diff --git a/.github/workflows/depscheck.yaml b/.github/workflows/depscheck.yaml index 620614f5093b..ca0ea25d18cf 100644 --- a/.github/workflows/depscheck.yaml +++ b/.github/workflows/depscheck.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make tools - run: make depscheck diff --git a/.github/workflows/gencheck.yaml b/.github/workflows/gencheck.yaml index 68f09b4c7f53..94af4afba7e3 100644 --- a/.github/workflows/gencheck.yaml +++ b/.github/workflows/gencheck.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make tools - run: make gencheck diff --git a/.github/workflows/golint.yaml b/.github/workflows/golint.yaml index cc975aec22d8..cd5536dc8b78 100644 --- a/.github/workflows/golint.yaml +++ b/.github/workflows/golint.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - uses: golangci/golangci-lint-action@v2 with: version: 'v1.41.1' diff --git a/.github/workflows/gradually-deprecated.yaml b/.github/workflows/gradually-deprecated.yaml index 8e0f914a77b3..f506d1c415a3 100644 --- a/.github/workflows/gradually-deprecated.yaml +++ b/.github/workflows/gradually-deprecated.yaml @@ -17,5 +17,5 @@ jobs: fetch-depth: 0 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: ./scripts/run-gradually-deprecated.sh diff --git a/.github/workflows/link-milestone.yaml b/.github/workflows/link-milestone.yaml index f341fcbb6090..f7799b808035 100644 --- a/.github/workflows/link-milestone.yaml +++ b/.github/workflows/link-milestone.yaml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: | go install github.com/stephybun/link-milestone@latest link-milestone diff --git a/.github/workflows/tflint.yaml b/.github/workflows/tflint.yaml index cf00d871dbab..7f829c26db80 100644 --- a/.github/workflows/tflint.yaml +++ b/.github/workflows/tflint.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make tools - run: make tflint diff --git a/.github/workflows/thirty-two-bit.yaml b/.github/workflows/thirty-two-bit.yaml index 1b8b324b1545..a2c98de541ab 100644 --- a/.github/workflows/thirty-two-bit.yaml +++ b/.github/workflows/thirty-two-bit.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make tools - run: GOARCH=386 GOOS=linux go build -o 32bitbuild . diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index 34f39af46339..a7b5d4ab7861 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make test env: diff --git a/.github/workflows/validate-examples.yaml b/.github/workflows/validate-examples.yaml index 997a210008d7..6fdf4ad15005 100644 --- a/.github/workflows/validate-examples.yaml +++ b/.github/workflows/validate-examples.yaml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make tools - run: make validate-examples diff --git a/.github/workflows/website-lint.yaml b/.github/workflows/website-lint.yaml index 9261cb34bb24..21e0e4d851ab 100644 --- a/.github/workflows/website-lint.yaml +++ b/.github/workflows/website-lint.yaml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.7' + go-version: '1.17.3' - run: bash scripts/gogetcookie.sh - run: make tools - run: make website-lint diff --git a/.go-version b/.go-version index 293bfa8f0689..021c75ee1246 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.16.7 +1.17.3 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 1430765be3d6..912786a8cc94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,27 +1,58 @@ -## 2.85.0 (Unreleased) +## 2.86.0 (Unreleased) + +IMPROVEMENTS: + +* compute: updating to use API Version `2021-07-01` [GH-14174] +* databricks: updating the embedded SDK to use the new Resource ID Parsers [GH-14157] +* datalake: updating the embedded SDK to use the new Resource ID Parsers [GH-14158] +* maps: updating the embedded SDK to use the new Resource ID Parsers [GH-14155] +* powerbi: updating the embedded SDK to use the new Resource ID Parsers [GH-14154] +* relay: updating the embedded SDK to use the new Resource ID Parsers [GH-14153] +* signalr: updating the embedded SDK to use the new Resource ID Parsers [GH-14150] +* storage: updating to use API Version `2021-04-01` [GH-14083] +* videoanalyzer: updating the embedded SDK to use the new Resource ID Parsers [GH-14135] +* `azurerm_orchestrated_virtual_machine_scale_set` - added support for VMSS Flex public preview [GH-14003] +* `azurerm_machine_learning_workspace` - support for the `encryption` block [GH-14120] + +BUG FIXES: + +* `azurerm_linux_virtual_machine_scale_set` - `source_image_reference.offer` and `source_image_reference.publisher` are now ForceNew [GH-14165] +* `azurerm_windows_virtual_machine_scale_set` - `source_image_reference.offer` and `source_image_reference.publisher` are now ForceNew [GH-14165] +* `azurerm_network_watcher_flow_log` - now locks on the network security group to prevent `AnotherOperationInProgress` errors [GH-14160] + +## 2.85.0 (November 12, 2021) FEATURES: -* **New Data Source:** `azurerm_batch_application` [GH-14043] +* **New Data Source:** `azurerm_batch_application` ([#14043](https://github.com/hashicorp/terraform-provider-azurerm/issues/14043)) +* **New Resource:** `azurerm_monitor_private_link_scope` ([#14098](https://github.com/hashicorp/terraform-provider-azurerm/issues/14098)) +* **New Resource:** `azurerm_mysql_flexible_server_firewall_rule` ([#14136](https://github.com/hashicorp/terraform-provider-azurerm/issues/14136)) +* **New Resource:** `azurerm_synapse_workspace_aad_admin` ([#13600](https://github.com/hashicorp/terraform-provider-azurerm/issues/13600)) IMPROVEMENTS: -* dependencies: upgrading to `v0.17.0` of `github.com/hashicorp/go-azure-helpers` [GH-14060] -* dependencies: upgrading to `v2.8.0` of `github.com/hashicorp/terraform-plugin-sdk` [GH-14060] -* `azurerm_application_insights` - support for the `internet_ingestion_enabled` and `internet_query_enabled` properties [GH-14035] -* `azurerm_security_center_subscription_pricing` - `resource_type` can now be set to `OpenSourceRelationalDatabases` [GH-14103] +* dependencies: upgrading to `v0.17.1` of `github.com/hashicorp/go-azure-helpers` ([#14141](https://github.com/hashicorp/terraform-provider-azurerm/issues/14141)) +* dependencies: upgrading to `v2.8.0` of `github.com/hashicorp/terraform-plugin-sdk` ([#14060](https://github.com/hashicorp/terraform-provider-azurerm/issues/14060)) +* `azurerm_application_insights` - support for the `internet_ingestion_enabled` and `internet_query_enabled` properties ([#14035](https://github.com/hashicorp/terraform-provider-azurerm/issues/14035)) +* `azurerm_backup_protected_vm` - support for the `exclude_disk_luns` and `include_disk_luns` properties ([#14097](https://github.com/hashicorp/terraform-provider-azurerm/issues/14097)) +* `azurerm_managed_disk_resource` - support for the `disk_iops_read_only` and `disk_mbps_read_only` properties ([#14025](https://github.com/hashicorp/terraform-provider-azurerm/issues/14025)) +* `azurerm_security_center_subscription_pricing` - `resource_type` can now be set to `OpenSourceRelationalDatabases` ([#14103](https://github.com/hashicorp/terraform-provider-azurerm/issues/14103)) +* `azurerm_storage_encryption_scope` - allow versionless `key_vault_key_id` ([#14085](https://github.com/hashicorp/terraform-provider-azurerm/issues/14085)) +* `azurerm_sql_managed_instance` - support for the `identity` block ([#14052](https://github.com/hashicorp/terraform-provider-azurerm/issues/14052)) +* `azurerm_virtual_network_gateway` - enable configuration of an active-active zone redundant gateway with P2S ([#14124](https://github.com/hashicorp/terraform-provider-azurerm/issues/14124)) BUG FIXES: -* Data Source: `azurerm_redis_cache` - parsing the `subnet_id` response value case-insensitively [GH-14108] -* Data Source: `azurerm_redis_cache` - ensuring that `shard_count` always has a value set [GH-14108] -* Data Source: `azurerm_consumption_budget_resource_group` - add missing `threshold_type` property in the schema [GH-14125] -* Data Source: `azurerm_consumption_budget_subscription` - add missing `threshold_type` property in the schema [GH-14125] -* `azurerm_api_management_certificate` - set `subject` property from correct field [GH-14026] -* `azurerm_app_service_virtual_network_swift_connection` - fixing a panic when checking for an existing resource during creation [GH-14070] -* `azurerm_redis_cache` - parsing the `subnet_id` response value case-insensitively [GH-14108] -* `azurerm_redis_cache` - ensuring that `shard_count` always has a value set [GH-14108] -* `azurerm_storage_blob` - ensuring that `cache_control` is sent during updates [GH-14100] +* Data Source: `azurerm_redis_cache` - parsing the `subnet_id` response value case-insensitively ([#14108](https://github.com/hashicorp/terraform-provider-azurerm/issues/14108)) +* Data Source: `azurerm_redis_cache` - ensuring that `shard_count` always has a value set ([#14108](https://github.com/hashicorp/terraform-provider-azurerm/issues/14108)) +* Data Source: `azurerm_consumption_budget_resource_group` - add missing `threshold_type` property in the schema ([#14125](https://github.com/hashicorp/terraform-provider-azurerm/issues/14125)) +* Data Source: `azurerm_consumption_budget_subscription` - add missing `threshold_type` property in the schema ([#14125](https://github.com/hashicorp/terraform-provider-azurerm/issues/14125)) +* `azurerm_api_management_certificate` - set `subject` property from correct field ([#14026](https://github.com/hashicorp/terraform-provider-azurerm/issues/14026)) +* `azurerm_app_service_virtual_network_swift_connection` - fixing a panic when checking for an existing resource during creation ([#14070](https://github.com/hashicorp/terraform-provider-azurerm/issues/14070)) +* `azurerm_frontdoor_resource` - route engines are no longer removed on update ([#14093](https://github.com/hashicorp/terraform-provider-azurerm/issues/14093)) +* `azurerm_redis_cache` - parsing the `subnet_id` response value case-insensitively ([#14108](https://github.com/hashicorp/terraform-provider-azurerm/issues/14108)) +* `azurerm_redis_cache` - ensuring that `shard_count` always has a value set ([#14108](https://github.com/hashicorp/terraform-provider-azurerm/issues/14108)) +* `azurerm_storage_blob` - ensuring that `cache_control` is sent during updates ([#14100](https://github.com/hashicorp/terraform-provider-azurerm/issues/14100)) ## 2.84.0 (November 05, 2021) diff --git a/README.md b/README.md index f0938e2d5e34..00c9f28ebd2b 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ Further [usage documentation is available on the Terraform website](https://regi ## Developer Requirements * [Terraform](https://www.terraform.io/downloads.html) version 0.12.x + (but 1.x is recommended) -* [Go](https://golang.org/doc/install) version 1.16.x (to build the provider plugin) +* [Go](https://golang.org/doc/install) version 1.17.x (to build the provider plugin) ### On Windows diff --git a/go.mod b/go.mod index ac44ccdc320a..578e4b42a9ab 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.1.2 github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-azure-helpers v0.17.0 + github.com/hashicorp/go-azure-helpers v0.17.1 github.com/hashicorp/go-getter v1.5.4 github.com/hashicorp/go-hclog v0.16.1 // indirect github.com/hashicorp/go-multierror v1.1.1 diff --git a/go.sum b/go.sum index def3d60c45c5..bd56bf86406c 100644 --- a/go.sum +++ b/go.sum @@ -249,8 +249,8 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-azure-helpers v0.12.0/go.mod h1:Zc3v4DNeX6PDdy7NljlYpnrdac1++qNW0I4U+ofGwpg= -github.com/hashicorp/go-azure-helpers v0.17.0 h1:HQF6kVNzofmwOhbRK/h/RWEVl/zu6kjB6mjoIxFL0A0= -github.com/hashicorp/go-azure-helpers v0.17.0/go.mod h1:L4ny2lW2muOESZR1XjbM59c3Pq+7SA9Yt1Nry5UfKZc= +github.com/hashicorp/go-azure-helpers v0.17.1 h1:qkvhSgyPzK3UwWQDocGLFmZ9iRjTj1Dv4CzeyhwfoIs= +github.com/hashicorp/go-azure-helpers v0.17.1/go.mod h1:L4ny2lW2muOESZR1XjbM59c3Pq+7SA9Yt1Nry5UfKZc= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= diff --git a/helpers/azure/sku.go b/helpers/azure/sku.go index 09c3fb1625f4..f804cf90b3b1 100644 --- a/helpers/azure/sku.go +++ b/helpers/azure/sku.go @@ -4,6 +4,9 @@ import ( "fmt" "strconv" "strings" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" + "github.com/hashicorp/terraform-provider-azurerm/utils" ) func SplitSku(sku string) (string, int32, error) { @@ -20,3 +23,50 @@ func SplitSku(sku string) (string, int32, error) { return skuParts[0], int32(capacity), nil } + +func ExpandOrchestratedVirtualMachineScaleSetSku(input string, capacity int) (*compute.Sku, error) { + skuParts := strings.Split(input, "_") + + if len(skuParts) < 2 || strings.Contains(input, "__") || strings.Contains(input, " ") { + return nil, fmt.Errorf("'sku_name'(%q) is not formatted properly.", input) + } + + sku := &compute.Sku{ + Name: utils.String(input), + Capacity: utils.Int64(int64(capacity)), + Tier: utils.String("Standard"), + } + + return sku, nil +} + +func FlattenOrchestratedVirtualMachineScaleSetSku(input *compute.Sku) (*string, error) { + var skuName string + if input != nil && input.Name != nil { + if strings.HasPrefix(strings.ToLower(*input.Name), "standard") { + skuName = *input.Name + } else { + skuName = fmt.Sprintf("Standard_%s", *input.Name) + } + + return &skuName, nil + } + + return nil, fmt.Errorf("Sku struct 'name' is nil") +} + +func ValidateOrchestratedVirtualMachineScaleSetSku(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + skuParts := strings.Split(v, "_") + + if len(skuParts) < 2 || strings.Contains(v, "__") || strings.Contains(v, " ") { + errors = append(errors, fmt.Errorf("%q(%q) is not formatted properly.", key, v)) + } + + return +} diff --git a/internal/acceptance/testcase.go b/internal/acceptance/testcase.go index 637c1320c96a..06d6154f8af7 100644 --- a/internal/acceptance/testcase.go +++ b/internal/acceptance/testcase.go @@ -13,10 +13,11 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/provider" ) -// lintignore:AT001 func (td TestData) DataSourceTest(t *testing.T, steps []TestStep) { // DataSources don't need a check destroy - however since this is a wrapper function // and not matching the ignore pattern `XXX_data_source_test.go`, this needs to be explicitly opted out + + // lintignore:AT001 testCase := resource.TestCase{ PreCheck: func() { PreCheck(t) }, Steps: steps, @@ -24,10 +25,11 @@ func (td TestData) DataSourceTest(t *testing.T, steps []TestStep) { td.runAcceptanceTest(t, testCase) } -// lintignore:AT001 func (td TestData) DataSourceTestInSequence(t *testing.T, steps []TestStep) { // DataSources don't need a check destroy - however since this is a wrapper function // and not matching the ignore pattern `XXX_data_source_test.go`, this needs to be explicitly opted out + + // lintignore:AT001 testCase := resource.TestCase{ PreCheck: func() { PreCheck(t) }, Steps: steps, @@ -36,7 +38,6 @@ func (td TestData) DataSourceTestInSequence(t *testing.T, steps []TestStep) { td.runAcceptanceSequentialTest(t, testCase) } -// lintignore:AT001 func (td TestData) ResourceTest(t *testing.T, testResource types.TestResource, steps []TestStep) { testCase := resource.TestCase{ PreCheck: func() { PreCheck(t) }, @@ -54,8 +55,8 @@ func (td TestData) ResourceTest(t *testing.T, testResource types.TestResource, s // ResourceTestIgnoreCheckDestroyed skips the check to confirm the resource test has been destroyed. // This is done because certain resources can't actually be deleted. -// lintignore:AT001 func (td TestData) ResourceTestSkipCheckDestroyed(t *testing.T, steps []TestStep) { + // lintignore:AT001 testCase := resource.TestCase{ PreCheck: func() { PreCheck(t) }, Steps: steps, @@ -63,8 +64,8 @@ func (td TestData) ResourceTestSkipCheckDestroyed(t *testing.T, steps []TestStep td.runAcceptanceTest(t, testCase) } -// lintignore:AT001 func (td TestData) ResourceSequentialTestSkipCheckDestroyed(t *testing.T, steps []TestStep) { + // lintignore:AT001 testCase := resource.TestCase{ PreCheck: func() { PreCheck(t) }, Steps: steps, @@ -132,7 +133,7 @@ func (td TestData) providers() map[string]func() (*schema.Provider, error) { func (td TestData) externalProviders() map[string]resource.ExternalProvider { return map[string]resource.ExternalProvider{ "azuread": { - VersionConstraint: "=1.5.1", + VersionConstraint: "=2.8.0", Source: "registry.terraform.io/hashicorp/azuread", }, } diff --git a/internal/services/apimanagement/api_management_api_operation_policy_resource.go b/internal/services/apimanagement/api_management_api_operation_policy_resource.go index 6678fc78f4d8..86200f446ebc 100644 --- a/internal/services/apimanagement/api_management_api_operation_policy_resource.go +++ b/internal/services/apimanagement/api_management_api_operation_policy_resource.go @@ -23,8 +23,10 @@ func resourceApiManagementApiOperationPolicy() *pluginsdk.Resource { Read: resourceApiManagementAPIOperationPolicyRead, Update: resourceApiManagementAPIOperationPolicyCreateUpdate, Delete: resourceApiManagementAPIOperationPolicyDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ApiOperationPolicyID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), diff --git a/internal/services/apimanagement/api_management_api_operation_policy_resource_test.go b/internal/services/apimanagement/api_management_api_operation_policy_resource_test.go index 85f811ccba95..36c18eec1a65 100644 --- a/internal/services/apimanagement/api_management_api_operation_policy_resource_test.go +++ b/internal/services/apimanagement/api_management_api_operation_policy_resource_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -94,18 +94,14 @@ func TestAccApiManagementAPIOperationPolicy_rawXml(t *testing.T) { } func (ApiManagementApiOperationPolicyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.ApiOperationPolicyID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - apiName := id.Path["apis"] - operationID := id.Path["operations"] - resp, err := clients.ApiManagement.ApiOperationPoliciesClient.Get(ctx, resourceGroup, serviceName, apiName, operationID, apimanagement.PolicyExportFormatXML) + resp, err := clients.ApiManagement.ApiOperationPoliciesClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, id.OperationName, apimanagement.PolicyExportFormatXML) if err != nil { - return nil, fmt.Errorf("reading ApiManagementApi Operation Policy (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_api_policy_resource.go b/internal/services/apimanagement/api_management_api_policy_resource.go index 933dc109439c..743f48a468a1 100644 --- a/internal/services/apimanagement/api_management_api_policy_resource.go +++ b/internal/services/apimanagement/api_management_api_policy_resource.go @@ -23,8 +23,10 @@ func resourceApiManagementApiPolicy() *pluginsdk.Resource { Read: resourceApiManagementAPIPolicyRead, Update: resourceApiManagementAPIPolicyCreateUpdate, Delete: resourceApiManagementAPIPolicyDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ApiPolicyID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -132,24 +134,21 @@ func resourceApiManagementAPIPolicyRead(d *pluginsdk.ResourceData, meta interfac if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - apiName := id.ApiName - resp, err := client.Get(ctx, resourceGroup, serviceName, apiName, apimanagement.PolicyExportFormatXML) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, apimanagement.PolicyExportFormatXML) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] API Policy (Resource Group %q / API Management Service %q / API %q) was not found - removing from state!", resourceGroup, serviceName, apiName) + log.Printf("[DEBUG] %s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("making Read request for API Policy (Resource Group %q / API Management Service %q / API %q): %+v", resourceGroup, serviceName, apiName, err) + return fmt.Errorf("making Read request for %s: %+v", *id, err) } - d.Set("resource_group_name", resourceGroup) - d.Set("api_management_name", serviceName) - d.Set("api_name", apiName) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("api_management_name", id.ServiceName) + d.Set("api_name", id.ApiName) if properties := resp.PolicyContractProperties; properties != nil { policyContent := "" @@ -174,13 +173,10 @@ func resourceApiManagementAPIPolicyDelete(d *pluginsdk.ResourceData, meta interf if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - apiName := id.ApiName - if resp, err := client.Delete(ctx, resourceGroup, serviceName, apiName, ""); err != nil { + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, ""); err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting API Policy (Resource Group %q / API Management Service %q / API %q): %+v", resourceGroup, serviceName, apiName, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } } diff --git a/internal/services/apimanagement/api_management_api_policy_resource_test.go b/internal/services/apimanagement/api_management_api_policy_resource_test.go index 80e90ba9d01b..b370b38e8fb7 100644 --- a/internal/services/apimanagement/api_management_api_policy_resource_test.go +++ b/internal/services/apimanagement/api_management_api_policy_resource_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -99,17 +99,14 @@ func TestAccApiManagementAPIPolicy_customPolicy(t *testing.T) { } func (ApiManagementApiPolicyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.ApiPolicyID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - apiName := id.Path["apis"] - resp, err := clients.ApiManagement.ApiPoliciesClient.Get(ctx, resourceGroup, serviceName, apiName, apimanagement.PolicyExportFormatXML) + resp, err := clients.ApiManagement.ApiPoliciesClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, apimanagement.PolicyExportFormatXML) if err != nil { - return nil, fmt.Errorf("reading ApiManagementApi Policy (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_api_resource.go b/internal/services/apimanagement/api_management_api_resource.go index 356632730f5c..1f24ff5986b2 100644 --- a/internal/services/apimanagement/api_management_api_resource.go +++ b/internal/services/apimanagement/api_management_api_resource.go @@ -25,8 +25,10 @@ func resourceApiManagementApi() *pluginsdk.Resource { Read: resourceApiManagementApiRead, Update: resourceApiManagementApiCreateUpdate, Delete: resourceApiManagementApiDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ApiID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -266,15 +268,15 @@ func resourceApiManagementApi() *pluginsdk.Resource { func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.ApiClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) - name := d.Get("name").(string) + id := parse.NewApiID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("name").(string)) + revision := d.Get("revision").(string) path := d.Get("path").(string) - apiId := fmt.Sprintf("%s;rev=%s", name, revision) + apiId := fmt.Sprintf("%s;rev=%s", id.Name, revision) version := d.Get("version").(string) versionSetId := d.Get("version_set_id").(string) displayName := d.Get("display_name").(string) @@ -291,10 +293,10 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf } if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, serviceName, apiId) + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, apiId) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing API %q (API Management Service %q / Resource Group %q): %s", name, serviceName, resourceGroup, err) + return fmt.Errorf("checking for presence of existing %s: %s", id, err) } } @@ -323,7 +325,7 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf contentFormat := importV["content_format"].(string) contentValue := importV["content_value"].(string) - log.Printf("[DEBUG] Importing API Management API %q of type %q", name, contentFormat) + log.Printf("[DEBUG] Importing API Management API %q of type %q", id.Name, contentFormat) apiParams := apimanagement.APICreateOrUpdateParameter{ APICreateOrUpdateProperties: &apimanagement.APICreateOrUpdateProperties{ APIType: apiType, @@ -338,7 +340,7 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf // `wsdl_selector` is necessary under format `wsdl` if len(wsdlSelectorVs) == 0 && contentFormat == string(apimanagement.Wsdl) { - return fmt.Errorf("`wsdl_selector` is required when content format is `wsdl` in API Management API %q", name) + return fmt.Errorf("`wsdl_selector` is required when content format is `wsdl` in API Management API %q", id.Name) } if len(wsdlSelectorVs) > 0 { @@ -356,13 +358,13 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf apiParams.APICreateOrUpdateProperties.APIVersionSetID = utils.String(versionSetId) } - future, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, apiId, apiParams, "") + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, apiId, apiParams, "") if err != nil { - return fmt.Errorf("creating/updating API Management API %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("creating/updating %s: %+v", id, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting on creating/updating API Management API %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("waiting on creating/updating %s: %+v", id, err) } } @@ -412,25 +414,16 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf params.APICreateOrUpdateProperties.APIVersionSetID = utils.String(versionSetId) } - future, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, apiId, params, "") + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, apiId, params, "") if err != nil { - return fmt.Errorf("creating/updating API Management API %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("creating/updating %s: %+v", id, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting on creating/updating API Management API %q (Resource Group %q): %+v", name, resourceGroup, err) - } - - read, err := client.Get(ctx, resourceGroup, serviceName, apiId) - if err != nil { - return fmt.Errorf("retrieving API %q / Revision %q (API Management Service %q / Resource Group %q): %+v", name, revision, serviceName, resourceGroup, err) + return fmt.Errorf("waiting on creating/updating %s: %+v", id, err) } - if read.ID == nil { - return fmt.Errorf("Cannot read ID for API %q / Revision %q (API Management Service %q / Resource Group %q)", name, revision, serviceName, resourceGroup) - } - - d.SetId(*read.ID) + d.SetId(id.ID()) return resourceApiManagementApiRead(d, meta) } @@ -444,31 +437,27 @@ func resourceApiManagementApiRead(d *pluginsdk.ResourceData, meta interface{}) e return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - apiid := id.Name - - name := apiid + name := id.Name revision := "" - if strings.Contains(apiid, ";") { - name = strings.Split(apiid, ";")[0] - revision = strings.Split(apiid, "=")[1] + if strings.Contains(id.Name, ";") { + name = strings.Split(id.Name, ";")[0] + revision = strings.Split(id.Name, "=")[1] } - resp, err := client.Get(ctx, resourceGroup, serviceName, apiid) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] API %q Revision %q (API Management Service %q / Resource Group %q) does not exist - removing from state!", name, revision, serviceName, resourceGroup) + log.Printf("[DEBUG] API %q Revision %q (API Management Service %q / Resource Group %q) does not exist - removing from state!", name, revision, id.ServiceName, id.ResourceGroup) d.SetId("") return nil } - return fmt.Errorf("retrieving API %q / Revision %q (API Management Service %q / Resource Group %q): %+v", name, revision, serviceName, resourceGroup, err) + return fmt.Errorf("retrieving API %q / Revision %q (API Management Service %q / Resource Group %q): %+v", name, revision, id.ServiceName, id.ResourceGroup, err) } - d.Set("api_management_name", serviceName) + d.Set("api_management_name", id.ServiceName) d.Set("name", name) - d.Set("resource_group_name", resourceGroup) + d.Set("resource_group_name", id.ResourceGroup) if props := resp.APIContractProperties; props != nil { d.Set("description", props.Description) @@ -515,21 +504,17 @@ func resourceApiManagementApiDelete(d *pluginsdk.ResourceData, meta interface{}) return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - apiid := id.Name - - name := apiid + name := id.Name revision := "" - if strings.Contains(apiid, ";") { - name = strings.Split(apiid, ";")[0] - revision = strings.Split(apiid, "=")[1] + if strings.Contains(id.Name, ";") { + name = strings.Split(id.Name, ";")[0] + revision = strings.Split(id.Name, "=")[1] } deleteRevisions := utils.Bool(true) - if resp, err := client.Delete(ctx, resourceGroup, serviceName, name, "", deleteRevisions); err != nil { + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, name, "", deleteRevisions); err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting API %q / Revision %q (API Management Service %q / Resource Group %q): %s", name, revision, serviceName, resourceGroup, err) + return fmt.Errorf("deleting API %q / Revision %q (API Management Service %q / Resource Group %q): %s", name, revision, id.ServiceName, id.ResourceGroup, err) } } diff --git a/internal/services/apimanagement/api_management_api_resource_test.go b/internal/services/apimanagement/api_management_api_resource_test.go index a0470746f48a..2eaff7b256d8 100644 --- a/internal/services/apimanagement/api_management_api_resource_test.go +++ b/internal/services/apimanagement/api_management_api_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -322,18 +322,14 @@ func TestAccApiManagementApi_createRevisionFromExistingRevision(t *testing.T) { } func (ApiManagementApiResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.ApiID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - apiid := id.Path["apis"] - - resp, err := clients.ApiManagement.ApiClient.Get(ctx, resourceGroup, serviceName, apiid) + resp, err := clients.ApiManagement.ApiClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { - return nil, fmt.Errorf("reading ApiManagementApi (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_authorization_server_resource.go b/internal/services/apimanagement/api_management_authorization_server_resource.go index efc11704b887..6fffdba061c9 100644 --- a/internal/services/apimanagement/api_management_authorization_server_resource.go +++ b/internal/services/apimanagement/api_management_authorization_server_resource.go @@ -23,8 +23,10 @@ func resourceApiManagementAuthorizationServer() *pluginsdk.Resource { Read: resourceApiManagementAuthorizationServerRead, Update: resourceApiManagementAuthorizationServerCreateUpdate, Delete: resourceApiManagementAuthorizationServerDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.AuthorizationServerID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -186,18 +188,17 @@ func resourceApiManagementAuthorizationServer() *pluginsdk.Resource { func resourceApiManagementAuthorizationServerCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.AuthorizationServersClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) - name := d.Get("name").(string) + id := parse.NewAuthorizationServerID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, serviceName, name) + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Authorization Server %q (API Management Service %q / Resource Group %q): %s", name, serviceName, resourceGroup, err) + return fmt.Errorf("checking for presence of existing %s: %s", id, err) } } @@ -261,20 +262,12 @@ func resourceApiManagementAuthorizationServerCreateUpdate(d *pluginsdk.ResourceD params.AuthorizationServerContractProperties.TokenEndpoint = utils.String(tokenEndpoint) } - if _, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, name, params, ""); err != nil { - return fmt.Errorf("creating/updating Authorization Server %q (API Management Service %q / Resource Group %q): %+v", name, serviceName, resourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.Name, params, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) } - read, err := client.Get(ctx, resourceGroup, serviceName, name) - if err != nil { - return fmt.Errorf("retrieving Authorization Server %q (API Management Service %q / Resource Group %q): %+v", name, serviceName, resourceGroup, err) - } - - if read.ID == nil { - return fmt.Errorf("Cannot read ID for Authorization Server %q (API Management Service %q / Resource Group %q)", name, serviceName, resourceGroup) - } + d.SetId(id.ID()) - d.SetId(*read.ID) return resourceApiManagementAuthorizationServerRead(d, meta) } @@ -288,24 +281,20 @@ func resourceApiManagementAuthorizationServerRead(d *pluginsdk.ResourceData, met return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - name := id.Name - - resp, err := client.Get(ctx, resourceGroup, serviceName, name) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] Authorization Server %q (API Management Service %q / Resource Group %q) does not exist - removing from state!", name, serviceName, resourceGroup) + log.Printf("[DEBUG] %s does not exist - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("retrieving Authorization Server %q (API Management Service %q / Resource Group %q): %+v", name, serviceName, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("api_management_name", serviceName) - d.Set("name", name) - d.Set("resource_group_name", resourceGroup) + d.Set("api_management_name", id.ServiceName) + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) if props := resp.AuthorizationServerContractProperties; props != nil { d.Set("authorization_endpoint", props.AuthorizationEndpoint) @@ -355,13 +344,9 @@ func resourceApiManagementAuthorizationServerDelete(d *pluginsdk.ResourceData, m return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - name := id.Name - - if resp, err := client.Delete(ctx, resourceGroup, serviceName, name, ""); err != nil { + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.Name, ""); err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting Authorization Server %q (API Management Service %q / Resource Group %q): %s", name, serviceName, resourceGroup, err) + return fmt.Errorf("deleting %s: %s", *id, err) } } diff --git a/internal/services/apimanagement/api_management_authorization_server_resource_test.go b/internal/services/apimanagement/api_management_authorization_server_resource_test.go index 2a596c3ff033..6478944631a2 100644 --- a/internal/services/apimanagement/api_management_authorization_server_resource_test.go +++ b/internal/services/apimanagement/api_management_authorization_server_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -62,18 +62,14 @@ func TestAccApiManagementAuthorizationServer_complete(t *testing.T) { } func (ApiManagementAuthorizationServerResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.AuthorizationServerID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - name := id.Path["authorizationServers"] - - resp, err := clients.ApiManagement.AuthorizationServersClient.Get(ctx, resourceGroup, serviceName, name) + resp, err := clients.ApiManagement.AuthorizationServersClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { - return nil, fmt.Errorf("reading ApiManagement Authorization Server (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go b/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go index c338aea1632e..69b8f6efe43b 100644 --- a/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go +++ b/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go @@ -129,22 +129,25 @@ resource "azurerm_api_management" "test" { } resource "azuread_application" "test" { - name = "acctestAM-%[5]d" - oauth2_allow_implicit_flow = true - reply_urls = [azurerm_api_management.test.developer_portal_url] + display_name = "acctestAM-%[5]d" + web { + redirect_uris = [azurerm_api_management.test.developer_portal_url] + + implicit_grant { + access_token_issuance_enabled = true + } + } } resource "azuread_application_password" "test" { application_object_id = azuread_application.test.object_id - end_date_relative = "36h" - value = "P@55w0rD!%[7]s" } resource "azurerm_api_management_identity_provider_aadb2c" "test" { resource_group_name = azurerm_resource_group.test.name api_management_name = azurerm_api_management.test.name client_id = azuread_application.test.application_id - client_secret = "P@55w0rD!%[7]s" + client_secret = azuread_application_password.test.value allowed_tenant = "%[4]s.onmicrosoft.com" signin_tenant = "%[4]s.onmicrosoft.com" authority = "%[4]s.b2clogin.com" @@ -155,7 +158,7 @@ resource "azurerm_api_management_identity_provider_aadb2c" "test" { depends_on = [azuread_application_password.test] } -`, b2cConfig["tenant_id"], b2cConfig["client_id"], b2cConfig["client_secret"], b2cConfig["tenant_slug"], data.RandomInteger, data.Locations.Primary, data.RandomString) +`, b2cConfig["tenant_id"], b2cConfig["client_id"], b2cConfig["client_secret"], b2cConfig["tenant_slug"], data.RandomInteger, data.Locations.Primary) } func (r ApiManagementIdentityProviderAADB2CResource) requiresImport(data acceptance.TestData, b2cConfig map[string]string) string { diff --git a/internal/services/apimanagement/api_management_named_value_resource.go b/internal/services/apimanagement/api_management_named_value_resource.go index d2a88fdec9d8..3fd78255909b 100644 --- a/internal/services/apimanagement/api_management_named_value_resource.go +++ b/internal/services/apimanagement/api_management_named_value_resource.go @@ -24,8 +24,10 @@ func resourceApiManagementNamedValue() *pluginsdk.Resource { Read: resourceApiManagementNamedValueRead, Update: resourceApiManagementNamedValueCreateUpdate, Delete: resourceApiManagementNamedValueDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.NamedValueID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -95,18 +97,17 @@ func resourceApiManagementNamedValue() *pluginsdk.Resource { func resourceApiManagementNamedValueCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.NamedValueClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) + id := parse.NewNamedValueID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, serviceName, name) + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf(" checking for presence of existing Property %q (API Management Service %q / Resource Group %q): %s", name, serviceName, resourceGroup, err) + return fmt.Errorf(" checking for presence of existing %s: %s", id, err) } } @@ -131,23 +132,16 @@ func resourceApiManagementNamedValueCreateUpdate(d *pluginsdk.ResourceData, meta parameters.NamedValueCreateContractProperties.Tags = utils.ExpandStringSlice(tags.([]interface{})) } - future, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, name, parameters, "") + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.Name, parameters, "") if err != nil { - return fmt.Errorf(" creating or updating Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf(" creating or updating %s: %+v", id, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting on creating/updating Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf("waiting on creating/updating %s: %+v", id, err) } - resp, err := client.Get(ctx, resourceGroup, serviceName, name) - if err != nil { - return fmt.Errorf(" retrieving Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) - } - if resp.ID == nil { - return fmt.Errorf("Cannot read ID for Property %q (Resource Group %q / API Management Service %q)", name, resourceGroup, serviceName) - } - d.SetId(*resp.ID) + d.SetId(id.ID()) return resourceApiManagementNamedValueRead(d, meta) } @@ -161,24 +155,21 @@ func resourceApiManagementNamedValueRead(d *pluginsdk.ResourceData, meta interfa if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - name := id.Name - resp, err := client.Get(ctx, resourceGroup, serviceName, name) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] Property %q (Resource Group %q / API Management Service %q) was not found - removing from state!", name, resourceGroup, serviceName) + log.Printf("[DEBUG] %s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf(" making Read request for Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf(" making Read request for %s: %+v", *id, err) } - d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) - d.Set("api_management_name", serviceName) + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("api_management_name", id.ServiceName) if properties := resp.NamedValueContractProperties; properties != nil { d.Set("display_name", properties.DisplayName) @@ -205,13 +196,10 @@ func resourceApiManagementNamedValueDelete(d *pluginsdk.ResourceData, meta inter if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - name := id.Name - if resp, err := client.Delete(ctx, resourceGroup, serviceName, name, ""); err != nil { + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.Name, ""); err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf(" deleting Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf(" deleting %s: %+v", *id, err) } } diff --git a/internal/services/apimanagement/api_management_named_value_resource_test.go b/internal/services/apimanagement/api_management_named_value_resource_test.go index 90df01fbf445..5daa66f8d8f5 100644 --- a/internal/services/apimanagement/api_management_named_value_resource_test.go +++ b/internal/services/apimanagement/api_management_named_value_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -113,17 +113,14 @@ func TestAccApiManagementNamedValue_update(t *testing.T) { } func (ApiManagementNamedValueResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.NamedValueID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - name := id.Path["namedValues"] - resp, err := clients.ApiManagement.NamedValueClient.Get(ctx, resourceGroup, serviceName, name) + resp, err := clients.ApiManagement.NamedValueClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { - return nil, fmt.Errorf("reading ApiManagement Named Value (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_product_group_resource.go b/internal/services/apimanagement/api_management_product_group_resource.go index 53b26311c228..ce559e75a412 100644 --- a/internal/services/apimanagement/api_management_product_group_resource.go +++ b/internal/services/apimanagement/api_management_product_group_resource.go @@ -20,8 +20,10 @@ func resourceApiManagementProductGroup() *pluginsdk.Resource { Create: resourceApiManagementProductGroupCreate, Read: resourceApiManagementProductGroupRead, Delete: resourceApiManagementProductGroupDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ProductGroupID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -44,34 +46,28 @@ func resourceApiManagementProductGroup() *pluginsdk.Resource { func resourceApiManagementProductGroupCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.ProductGroupsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) - groupName := d.Get("group_name").(string) - productId := d.Get("product_id").(string) + id := parse.NewProductGroupID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("product_id").(string), d.Get("group_name").(string)) - exists, err := client.CheckEntityExists(ctx, resourceGroup, serviceName, productId, groupName) + exists, err := client.CheckEntityExists(ctx, id.ResourceGroup, id.ServiceName, id.ProductName, id.GroupName) if err != nil { if !utils.ResponseWasNotFound(exists) { - return fmt.Errorf("checking for present of existing Product %q / Group %q (API Management Service %q / Resource Group %q): %+v", productId, groupName, serviceName, resourceGroup, err) + return fmt.Errorf("checking for present of existing %s: %+v", id, err) } } if !utils.ResponseWasNotFound(exists) { - // TODO: can we pull this from somewhere? - subscriptionId := meta.(*clients.Client).Account.SubscriptionId - resourceId := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ApiManagement/service/%s/products/%s/groups/%s", subscriptionId, resourceGroup, serviceName, productId, groupName) - return tf.ImportAsExistsError("azurerm_api_management_product_group", resourceId) + return tf.ImportAsExistsError("azurerm_api_management_product_group", id.ID()) } - resp, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, productId, groupName) - if err != nil { - return fmt.Errorf("adding Product %q to Group %q (API Management Service %q / Resource Group %q): %+v", productId, groupName, serviceName, resourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.ProductName, id.GroupName); err != nil { + return fmt.Errorf("adding Product %q to Group %q (API Management Service %q / Resource Group %q): %+v", id.ProductName, id.GroupName, id.ServiceName, id.ResourceGroup, err) } - d.SetId(*resp.ID) + d.SetId(id.ID()) return resourceApiManagementProductGroupRead(d, meta) } @@ -85,26 +81,22 @@ func resourceApiManagementProductGroupRead(d *pluginsdk.ResourceData, meta inter if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - groupName := id.GroupName - productId := id.ProductName - resp, err := client.CheckEntityExists(ctx, resourceGroup, serviceName, productId, groupName) + resp, err := client.CheckEntityExists(ctx, id.ResourceGroup, id.ServiceName, id.ProductName, id.GroupName) if err != nil { if utils.ResponseWasNotFound(resp) { - log.Printf("[DEBUG] Product %q was not found in Group %q (API Management Service %q / Resource Group %q) was not found - removing from state!", productId, groupName, serviceName, resourceGroup) + log.Printf("[DEBUG] %s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("retrieving Product %q / Group %q (API Management Service %q / Resource Group %q): %+v", productId, groupName, serviceName, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("group_name", groupName) - d.Set("product_id", productId) - d.Set("resource_group_name", resourceGroup) - d.Set("api_management_name", serviceName) + d.Set("group_name", id.GroupName) + d.Set("product_id", id.ProductName) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("api_management_name", id.ServiceName) return nil } @@ -118,14 +110,10 @@ func resourceApiManagementProductGroupDelete(d *pluginsdk.ResourceData, meta int if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - groupName := id.GroupName - productId := id.ProductName - if resp, err := client.Delete(ctx, resourceGroup, serviceName, productId, groupName); err != nil { + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.ProductName, id.GroupName); err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("removing Product %q from Group %q (API Management Service %q / Resource Group %q): %+v", productId, groupName, serviceName, resourceGroup, err) + return fmt.Errorf("removing %s: %+v", *id, err) } } diff --git a/internal/services/apimanagement/api_management_product_group_resource_test.go b/internal/services/apimanagement/api_management_product_group_resource_test.go index 0b6a7735cf29..a927cfcf1bc1 100644 --- a/internal/services/apimanagement/api_management_product_group_resource_test.go +++ b/internal/services/apimanagement/api_management_product_group_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -47,17 +47,13 @@ func TestAccApiManagementProductGroup_requiresImport(t *testing.T) { } func (ApiManagementProductGroupResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.ProductGroupID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - groupName := id.Path["groups"] - productId := id.Path["products"] - if _, err = clients.ApiManagement.ProductGroupsClient.CheckEntityExists(ctx, resourceGroup, serviceName, productId, groupName); err != nil { - return nil, fmt.Errorf("reading ApiManagement Product Group (%s): %+v", id, err) + if _, err = clients.ApiManagement.ProductGroupsClient.CheckEntityExists(ctx, id.ResourceGroup, id.ServiceName, id.ProductName, id.GroupName); err != nil { + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(true), nil diff --git a/internal/services/apimanagement/api_management_product_policy_resource.go b/internal/services/apimanagement/api_management_product_policy_resource.go index f4c78b51c40a..9bcfc7f06542 100644 --- a/internal/services/apimanagement/api_management_product_policy_resource.go +++ b/internal/services/apimanagement/api_management_product_policy_resource.go @@ -23,8 +23,10 @@ func resourceApiManagementProductPolicy() *pluginsdk.Resource { Read: resourceApiManagementProductPolicyRead, Update: resourceApiManagementProductPolicyCreateUpdate, Delete: resourceApiManagementProductPolicyDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ProductPolicyID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), diff --git a/internal/services/apimanagement/api_management_product_policy_resource_test.go b/internal/services/apimanagement/api_management_product_policy_resource_test.go index 171988dff28d..3887813f730d 100644 --- a/internal/services/apimanagement/api_management_product_policy_resource_test.go +++ b/internal/services/apimanagement/api_management_product_policy_resource_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -79,17 +79,14 @@ func TestAccApiManagementProductPolicy_update(t *testing.T) { } func (ApiManagementProductPolicyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.ProductPolicyID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - productID := id.Path["products"] - resp, err := clients.ApiManagement.ProductPoliciesClient.Get(ctx, resourceGroup, serviceName, productID, apimanagement.PolicyExportFormatXML) + resp, err := clients.ApiManagement.ProductPoliciesClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.ProductName, apimanagement.PolicyExportFormatXML) if err != nil { - return nil, fmt.Errorf("reading ApiManagement Product Policy (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_product_resource.go b/internal/services/apimanagement/api_management_product_resource.go index c2ca60f95377..1244974cf8a6 100644 --- a/internal/services/apimanagement/api_management_product_resource.go +++ b/internal/services/apimanagement/api_management_product_resource.go @@ -23,8 +23,10 @@ func resourceApiManagementProduct() *pluginsdk.Resource { Read: resourceApiManagementProductRead, Update: resourceApiManagementProductCreateUpdate, Delete: resourceApiManagementProductDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ProductID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -81,14 +83,13 @@ func resourceApiManagementProduct() *pluginsdk.Resource { func resourceApiManagementProductCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.ProductsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for API Management Product creation.") - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) - productId := d.Get("product_id").(string) + id := parse.NewProductID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("product_id").(string)) displayName := d.Get("display_name").(string) description := d.Get("description").(string) @@ -99,10 +100,10 @@ func resourceApiManagementProductCreateUpdate(d *pluginsdk.ResourceData, meta in published := d.Get("published").(bool) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, serviceName, productId) + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Product %q (API Management Service %q / Resource Group %q): %s", productId, serviceName, resourceGroup, err) + return fmt.Errorf("checking for presence of existing %s: %s", id, err) } } @@ -134,20 +135,11 @@ func resourceApiManagementProductCreateUpdate(d *pluginsdk.ResourceData, meta in return fmt.Errorf("`subscription_required` must be true and `subscriptions_limit` must be greater than 0 to use `approval_required`") } - if _, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, productId, properties, ""); err != nil { - return fmt.Errorf("creating/updating Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.Name, properties, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) } - resp, err := client.Get(ctx, resourceGroup, serviceName, productId) - if err != nil { - return fmt.Errorf("retrieving Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) - } - - if resp.ID == nil { - return fmt.Errorf("Cannot read ID for Product %q (API Management Service %q / Resource Group %q)", productId, serviceName, resourceGroup) - } - - d.SetId(*resp.ID) + d.SetId(id.ID()) return resourceApiManagementProductRead(d, meta) } @@ -162,24 +154,20 @@ func resourceApiManagementProductRead(d *pluginsdk.ResourceData, meta interface{ return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - productId := id.Name - - resp, err := client.Get(ctx, resourceGroup, serviceName, productId) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("Product %q was not found in API Management Service %q / Resource Group %q - removing from state!", productId, serviceName, resourceGroup) + log.Printf("%s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("making Read request on Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + return fmt.Errorf("making Read request on %s: %+v", *id, err) } - d.Set("product_id", productId) - d.Set("api_management_name", serviceName) - d.Set("resource_group_name", resourceGroup) + d.Set("product_id", id.Name) + d.Set("api_management_name", id.ServiceName) + d.Set("resource_group_name", id.ResourceGroup) if props := resp.ProductContractProperties; props != nil { d.Set("approval_required", props.ApprovalRequired) @@ -203,16 +191,13 @@ func resourceApiManagementProductDelete(d *pluginsdk.ResourceData, meta interfac if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - productId := id.Name - log.Printf("[DEBUG] Deleting Product %q (API Management Service %q / Resource Grouo %q)", productId, serviceName, resourceGroup) + log.Printf("[DEBUG] Deleting %s", *id) deleteSubscriptions := true - resp, err := client.Delete(ctx, resourceGroup, serviceName, productId, "", utils.Bool(deleteSubscriptions)) + resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.Name, "", utils.Bool(deleteSubscriptions)) if err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } } diff --git a/internal/services/apimanagement/api_management_product_resource_test.go b/internal/services/apimanagement/api_management_product_resource_test.go index 764bbf38d3b9..d87b114318a3 100644 --- a/internal/services/apimanagement/api_management_product_resource_test.go +++ b/internal/services/apimanagement/api_management_product_resource_test.go @@ -6,10 +6,10 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -156,17 +156,14 @@ func TestAccApiManagementProduct_approvalRequiredError(t *testing.T) { } func (ApiManagementProductResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.ProductID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - productId := id.Path["products"] - resp, err := clients.ApiManagement.ProductsClient.Get(ctx, resourceGroup, serviceName, productId) + resp, err := clients.ApiManagement.ProductsClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { - return nil, fmt.Errorf("reading ApiManagement Product (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_property_resource.go b/internal/services/apimanagement/api_management_property_resource.go index d678ef5fd93c..d031c9156d53 100644 --- a/internal/services/apimanagement/api_management_property_resource.go +++ b/internal/services/apimanagement/api_management_property_resource.go @@ -27,8 +27,10 @@ func resourceApiManagementProperty() *pluginsdk.Resource { DeprecationMessage: "This resource has been superseded by `azurerm_api_management_named_value` to reflects changes in the API/SDK and will be removed in version 3.0 of the provider.", - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.PropertyID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -80,18 +82,17 @@ func resourceApiManagementProperty() *pluginsdk.Resource { func resourceApiManagementPropertyCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.NamedValueClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) + id := parse.NewPropertyID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, serviceName, name) + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.NamedValueName) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Property %q (API Management Service %q / Resource Group %q): %s", name, serviceName, resourceGroup, err) + return fmt.Errorf("checking for presence of %s: %s", id, err) } } @@ -112,25 +113,16 @@ func resourceApiManagementPropertyCreateUpdate(d *pluginsdk.ResourceData, meta i parameters.NamedValueCreateContractProperties.Tags = utils.ExpandStringSlice(tags.([]interface{})) } - future, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, name, parameters, "") + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.NamedValueName, parameters, "") if err != nil { - return fmt.Errorf(" creating or updating Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf(" creating or updating Property %s: %+v", id, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting on creating/updating Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) - } - - resp, err := client.Get(ctx, resourceGroup, serviceName, name) - if err != nil { - return fmt.Errorf("retrieving Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) - } - - if resp.ID == nil { - return fmt.Errorf("Cannot read ID for Property %q (Resource Group %q / API Management Service %q)", name, resourceGroup, serviceName) + return fmt.Errorf("waiting on creating/updating %s: %+v", id, err) } - d.SetId(*resp.ID) + d.SetId(id.ID()) return resourceApiManagementPropertyRead(d, meta) } @@ -144,24 +136,21 @@ func resourceApiManagementPropertyRead(d *pluginsdk.ResourceData, meta interface if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - name := id.NamedValueName - resp, err := client.Get(ctx, resourceGroup, serviceName, name) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.NamedValueName) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] Property %q (Resource Group %q / API Management Service %q) was not found - removing from state!", name, resourceGroup, serviceName) + log.Printf("[DEBUG] %s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("making Read request for Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf("making Read request for %s: %+v", *id, err) } - d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) - d.Set("api_management_name", serviceName) + d.Set("name", id.NamedValueName) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("api_management_name", id.ServiceName) if properties := resp.NamedValueContractProperties; properties != nil { d.Set("display_name", properties.DisplayName) @@ -184,13 +173,10 @@ func resourceApiManagementPropertyDelete(d *pluginsdk.ResourceData, meta interfa if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - name := id.NamedValueName - if resp, err := client.Delete(ctx, resourceGroup, serviceName, name, ""); err != nil { + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.NamedValueName, ""); err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting Property %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } } diff --git a/internal/services/apimanagement/api_management_property_resource_test.go b/internal/services/apimanagement/api_management_property_resource_test.go index 2d47f986d340..5fd11e5e718c 100644 --- a/internal/services/apimanagement/api_management_property_resource_test.go +++ b/internal/services/apimanagement/api_management_property_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -66,17 +66,14 @@ func TestAccApiManagementProperty_update(t *testing.T) { } func (ApiManagementPropertyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.PropertyID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - name := id.Path["namedValues"] - resp, err := clients.ApiManagement.NamedValueClient.Get(ctx, resourceGroup, serviceName, name) + resp, err := clients.ApiManagement.NamedValueClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.NamedValueName) if err != nil { - return nil, fmt.Errorf("reading ApiManagement Property (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/apimanagement/api_management_user_resource.go b/internal/services/apimanagement/api_management_user_resource.go index 8d52998e69a9..8bba35318c92 100644 --- a/internal/services/apimanagement/api_management_user_resource.go +++ b/internal/services/apimanagement/api_management_user_resource.go @@ -23,8 +23,10 @@ func resourceApiManagementUser() *pluginsdk.Resource { Read: resourceApiManagementUserRead, Update: resourceApiManagementUserCreateUpdate, Delete: resourceApiManagementUserDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.UserID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(45 * time.Minute), @@ -95,14 +97,13 @@ func resourceApiManagementUser() *pluginsdk.Resource { func resourceApiManagementUserCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).ApiManagement.UsersClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for API Management User creation.") - resourceGroup := d.Get("resource_group_name").(string) - serviceName := d.Get("api_management_name").(string) - userId := d.Get("user_id").(string) + id := parse.NewUserID(subscriptionId, d.Get("resource_group_name").(string), d.Get("api_management_name").(string), d.Get("user_id").(string)) firstName := d.Get("first_name").(string) lastName := d.Get("last_name").(string) @@ -112,10 +113,10 @@ func resourceApiManagementUserCreateUpdate(d *pluginsdk.ResourceData, meta inter password := d.Get("password").(string) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, serviceName, userId) + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing User %q (API Management Service %q / Resource Group %q): %s", userId, serviceName, resourceGroup, err) + return fmt.Errorf("checking for presence of existing %s: %s", id, err) } } @@ -147,20 +148,11 @@ func resourceApiManagementUserCreateUpdate(d *pluginsdk.ResourceData, meta inter } notify := utils.Bool(false) - if _, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, userId, properties, notify, ""); err != nil { - return fmt.Errorf("creating/updating User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.Name, properties, notify, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) } - resp, err := client.Get(ctx, resourceGroup, serviceName, userId) - if err != nil { - return fmt.Errorf("retrieving User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) - } - - if resp.ID == nil { - return fmt.Errorf("Cannot read ID for User %q (API Management Service %q / Resource Group %q)", userId, serviceName, resourceGroup) - } - - d.SetId(*resp.ID) + d.SetId(id.ID()) return resourceApiManagementUserRead(d, meta) } @@ -175,24 +167,20 @@ func resourceApiManagementUserRead(d *pluginsdk.ResourceData, meta interface{}) return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - userId := id.Name - - resp, err := client.Get(ctx, resourceGroup, serviceName, userId) + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("User %q was not found in API Management Service %q / Resource Group %q - removing from state!", userId, serviceName, resourceGroup) + log.Printf("%s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("making Read request on User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + return fmt.Errorf("making Read request on %s: %+v", *id, err) } - d.Set("user_id", userId) - d.Set("api_management_name", serviceName) - d.Set("resource_group_name", resourceGroup) + d.Set("user_id", id.Name) + d.Set("api_management_name", id.ServiceName) + d.Set("resource_group_name", id.ResourceGroup) if props := resp.UserContractProperties; props != nil { d.Set("first_name", props.FirstName) @@ -214,17 +202,14 @@ func resourceApiManagementUserDelete(d *pluginsdk.ResourceData, meta interface{} if err != nil { return err } - resourceGroup := id.ResourceGroup - serviceName := id.ServiceName - userId := id.Name - log.Printf("[DEBUG] Deleting User %q (API Management Service %q / Resource Grouo %q)", userId, serviceName, resourceGroup) + log.Printf("[DEBUG] Deleting %s", *id) deleteSubscriptions := utils.Bool(true) notify := utils.Bool(false) - resp, err := client.Delete(ctx, resourceGroup, serviceName, userId, "", deleteSubscriptions, notify, apimanagement.DeveloperPortal) + resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.Name, "", deleteSubscriptions, notify, apimanagement.DeveloperPortal) if err != nil { if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } } diff --git a/internal/services/apimanagement/api_management_user_resource_test.go b/internal/services/apimanagement/api_management_user_resource_test.go index 14e51e6bfcc9..fe0ed9f5c88d 100644 --- a/internal/services/apimanagement/api_management_user_resource_test.go +++ b/internal/services/apimanagement/api_management_user_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/apimanagement/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -178,17 +178,14 @@ func TestAccApiManagementUser_complete(t *testing.T) { } func (ApiManagementUserResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.UserID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - serviceName := id.Path["service"] - userId := id.Path["users"] - resp, err := clients.ApiManagement.UsersClient.Get(ctx, resourceGroup, serviceName, userId) + resp, err := clients.ApiManagement.UsersClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) if err != nil { - return nil, fmt.Errorf("reading ApiManagement User (%s): %+v", id, err) + return nil, fmt.Errorf("reading %s: %+v", *id, err) } return utils.Bool(resp.ID != nil), nil diff --git a/internal/services/authorization/role_assignment_resource_test.go b/internal/services/authorization/role_assignment_resource_test.go index e566ebb267d9..cd8db471e9f8 100644 --- a/internal/services/authorization/role_assignment_resource_test.go +++ b/internal/services/authorization/role_assignment_resource_test.go @@ -519,7 +519,7 @@ data "azurerm_subscription" "current" { } resource "azuread_application" "test" { - name = "acctestspa-%d" + display_name = "acctestspa-%d" } resource "azuread_service_principal" "test" { @@ -547,7 +547,7 @@ data "azurerm_subscription" "current" { } resource "azuread_application" "test" { - name = "acctestspa-%d" + display_name = "acctestspa-%d" } resource "azuread_service_principal" "test" { @@ -576,7 +576,8 @@ data "azurerm_subscription" "current" { } resource "azuread_group" "test" { - name = "acctestspa-%d" + display_name = "acctestspa-%d" + security_enabled = true } resource "azurerm_role_assignment" "test" { diff --git a/internal/services/azurestackhci/stack_hci_cluster_resource_test.go b/internal/services/azurestackhci/stack_hci_cluster_resource_test.go index 788eae0c9ead..4006703f3c0a 100644 --- a/internal/services/azurestackhci/stack_hci_cluster_resource_test.go +++ b/internal/services/azurestackhci/stack_hci_cluster_resource_test.go @@ -180,7 +180,7 @@ provider "azuread" {} data "azurerm_client_config" "current" {} resource "azuread_application" "test" { - name = "acctestspa-%d" + display_name = "acctestspa-%d" } resource "azurerm_resource_group" "test" { diff --git a/internal/services/compute/availability_set_resource.go b/internal/services/compute/availability_set_resource.go index 2818362338ff..cb39796bcd8a 100644 --- a/internal/services/compute/availability_set_resource.go +++ b/internal/services/compute/availability_set_resource.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/compute/client/client.go b/internal/services/compute/client/client.go index 6c8c8de9bccd..fa5e6d228931 100644 --- a/internal/services/compute/client/client.go +++ b/internal/services/compute/client/client.go @@ -1,7 +1,7 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/marketplaceordering/mgmt/2015-06-01/marketplaceordering" "github.com/hashicorp/terraform-provider-azurerm/internal/common" ) diff --git a/internal/services/compute/dedicated_host_group_resource.go b/internal/services/compute/dedicated_host_group_resource.go index 0907b66af224..39b67a1691e1 100644 --- a/internal/services/compute/dedicated_host_group_resource.go +++ b/internal/services/compute/dedicated_host_group_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/compute/dedicated_host_resource.go b/internal/services/compute/dedicated_host_resource.go index e707bde00ab5..0778f8a70042 100644 --- a/internal/services/compute/dedicated_host_resource.go +++ b/internal/services/compute/dedicated_host_resource.go @@ -6,7 +6,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" diff --git a/internal/services/compute/disk_access_resource.go b/internal/services/compute/disk_access_resource.go index b4aff8ecdad1..703c4dc38ab9 100644 --- a/internal/services/compute/disk_access_resource.go +++ b/internal/services/compute/disk_access_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/compute/disk_encryption_set_resource.go b/internal/services/compute/disk_encryption_set_resource.go index 5ff069be6e74..3ba4b1a5733a 100644 --- a/internal/services/compute/disk_encryption_set_resource.go +++ b/internal/services/compute/disk_encryption_set_resource.go @@ -6,7 +6,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/compute/encryption_settings.go b/internal/services/compute/encryption_settings.go index 944348b306b1..a050c9deb3b7 100644 --- a/internal/services/compute/encryption_settings.go +++ b/internal/services/compute/encryption_settings.go @@ -1,7 +1,7 @@ package compute import ( - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) diff --git a/internal/services/compute/helpers.go b/internal/services/compute/helpers.go index ea5aabe1703b..10f3ae4b6671 100644 --- a/internal/services/compute/helpers.go +++ b/internal/services/compute/helpers.go @@ -1,7 +1,7 @@ package compute import ( - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/utils" ) diff --git a/internal/services/compute/image_data_source.go b/internal/services/compute/image_data_source.go index 0453c9ad7cd3..87e44b70693c 100644 --- a/internal/services/compute/image_data_source.go +++ b/internal/services/compute/image_data_source.go @@ -7,7 +7,7 @@ import ( "sort" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" diff --git a/internal/services/compute/image_resource.go b/internal/services/compute/image_resource.go index 6f3fd2ce56ec..021ed938ea3d 100644 --- a/internal/services/compute/image_resource.go +++ b/internal/services/compute/image_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/compute/image_resource_test.go b/internal/services/compute/image_resource_test.go index 232da941503e..021c0c84a01b 100644 --- a/internal/services/compute/image_resource_test.go +++ b/internal/services/compute/image_resource_test.go @@ -280,7 +280,8 @@ func (ImageResource) generalizeVirtualMachine(data acceptance.TestData) func(con } log.Printf("[DEBUG] Deallocating VM..") - future, err := client.Compute.VMClient.Deallocate(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new hibernate parameter in the GET method + future, err := client.Compute.VMClient.Deallocate(ctx, id.ResourceGroup, id.Name, utils.Bool(false)) if err != nil { return fmt.Errorf("Bad: deallocating vm: %+v", err) } @@ -322,7 +323,8 @@ func (ImageResource) virtualMachineScaleSetExists(ctx context.Context, client *c return err } - resp, err := client.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name, "") if err != nil { if utils.ResponseWasNotFound(resp.Response) { return fmt.Errorf("%s does not exist", *id) diff --git a/internal/services/compute/images_data_source.go b/internal/services/compute/images_data_source.go index 3aa59d71344b..18ee48a104c3 100644 --- a/internal/services/compute/images_data_source.go +++ b/internal/services/compute/images_data_source.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/location" diff --git a/internal/services/compute/linux_virtual_machine_resource.go b/internal/services/compute/linux_virtual_machine_resource.go index 503974598e14..82e92cb27378 100644 --- a/internal/services/compute/linux_virtual_machine_resource.go +++ b/internal/services/compute/linux_virtual_machine_resource.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" @@ -1039,7 +1039,8 @@ func resourceLinuxVirtualMachineUpdate(d *pluginsdk.ResourceData, meta interface if shouldDeallocate { if !hasEphemeralOSDisk { log.Printf("[DEBUG] Deallocating Linux Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) - future, err := client.Deallocate(ctx, id.ResourceGroup, id.Name) + // Upgrade to 2021-07-01 added a hibernate parameter to this call defaulting to false + future, err := client.Deallocate(ctx, id.ResourceGroup, id.Name, utils.Bool(false)) if err != nil { return fmt.Errorf("Deallocating Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } diff --git a/internal/services/compute/linux_virtual_machine_scale_set_resource.go b/internal/services/compute/linux_virtual_machine_scale_set_resource.go index 5536f99b999a..16bced689f06 100644 --- a/internal/services/compute/linux_virtual_machine_scale_set_resource.go +++ b/internal/services/compute/linux_virtual_machine_scale_set_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" azValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" @@ -282,7 +282,8 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta i resourceGroup := d.Get("resource_group_name").(string) name := d.Get("name").(string) - exists, err := client.Get(ctx, resourceGroup, name) + // Upgrading to the 2021-07-01 exposed a new expand parameter to the GET method + exists, err := client.Get(ctx, resourceGroup, name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if !utils.ResponseWasNotFound(exists.Response) { return fmt.Errorf("checking for existing Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) @@ -345,16 +346,16 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta i rollingUpgradePolicy := ExpandVirtualMachineScaleSetRollingUpgradePolicy(rollingUpgradePolicyRaw) if upgradeMode != compute.UpgradeModeAutomatic && len(automaticOSUpgradePolicyRaw) > 0 { - return fmt.Errorf("An `automatic_os_upgrade_policy` block cannot be specified when `upgrade_mode` is not set to `Automatic`") + return fmt.Errorf("an `automatic_os_upgrade_policy` block cannot be specified when `upgrade_mode` is not set to `Automatic`") } shouldHaveRollingUpgradePolicy := upgradeMode == compute.UpgradeModeAutomatic || upgradeMode == compute.UpgradeModeRolling if !shouldHaveRollingUpgradePolicy && len(rollingUpgradePolicyRaw) > 0 { - return fmt.Errorf("A `rolling_upgrade_policy` block cannot be specified when `upgrade_mode` is set to %q", string(upgradeMode)) + return fmt.Errorf("a `rolling_upgrade_policy` block cannot be specified when `upgrade_mode` is set to %q", string(upgradeMode)) } shouldHaveRollingUpgradePolicy = upgradeMode == compute.UpgradeModeRolling if shouldHaveRollingUpgradePolicy && len(rollingUpgradePolicyRaw) == 0 { - return fmt.Errorf("A `rolling_upgrade_policy` block must be specified when `upgrade_mode` is set to %q", string(upgradeMode)) + return fmt.Errorf("a `rolling_upgrade_policy` block must be specified when `upgrade_mode` is set to %q", string(upgradeMode)) } secretsRaw := d.Get("secret").([]interface{}) @@ -461,16 +462,16 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta i // Azure API: "Authentication using either SSH or by user name and password must be enabled in Linux profile." if disablePasswordAuthentication && virtualMachineProfile.OsProfile.AdminPassword == nil && len(sshKeys) == 0 { - return fmt.Errorf("At least one SSH key must be specified if `disable_password_authentication` is enabled") + return fmt.Errorf("at least one SSH key must be specified if `disable_password_authentication` is enabled") } if evictionPolicyRaw, ok := d.GetOk("eviction_policy"); ok { if virtualMachineProfile.Priority != compute.VirtualMachinePriorityTypesSpot { - return fmt.Errorf("An `eviction_policy` can only be specified when `priority` is set to `Spot`") + return fmt.Errorf("an `eviction_policy` can only be specified when `priority` is set to `Spot`") } virtualMachineProfile.EvictionPolicy = compute.VirtualMachineEvictionPolicyTypes(evictionPolicyRaw.(string)) } else if priority == compute.VirtualMachinePriorityTypesSpot { - return fmt.Errorf("An `eviction_policy` must be specified when `priority` is set to `Spot`") + return fmt.Errorf("an `eviction_policy` must be specified when `priority` is set to `Spot`") } if v, ok := d.GetOk("terminate_notification"); ok { @@ -501,6 +502,10 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta i SinglePlacementGroup: utils.Bool(d.Get("single_placement_group").(bool)), VirtualMachineProfile: &virtualMachineProfile, UpgradePolicy: &upgradePolicy, + // OrchestrationMode needs to be hardcoded to Uniform, for the + // standard VMSS resource, since virtualMachineProfile is now supported + // in both VMSS and Orchestrated VMSS... + OrchestrationMode: compute.OrchestrationModeUniform, ScaleInPolicy: &compute.ScaleInPolicy{ Rules: &[]compute.VirtualMachineScaleSetScaleInRules{compute.VirtualMachineScaleSetScaleInRules(scaleInPolicy)}, }, @@ -539,7 +544,8 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta i log.Printf("[DEBUG] Virtual Machine Scale Set %q (Resource Group %q) was created", name, resourceGroup) log.Printf("[DEBUG] Retrieving Virtual Machine Scale Set %q (Resource Group %q)..", name, resourceGroup) - resp, err := client.Get(ctx, resourceGroup, name) + // Upgrading to the 2021-07-01 exposed a new expand parameter to the GET method + resp, err := client.Get(ctx, resourceGroup, name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { return fmt.Errorf("retrieving Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -565,7 +571,8 @@ func resourceLinuxVirtualMachineScaleSetUpdate(d *pluginsdk.ResourceData, meta i updateInstances := false // retrieve - existing, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter to the GET method + existing, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { return fmt.Errorf("retrieving Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } @@ -867,7 +874,8 @@ func resourceLinuxVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta int return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter to the GET method + resp, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if utils.ResponseWasNotFound(resp.Response) { log.Printf("[DEBUG] Linux Virtual Machine Scale Set %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) @@ -1074,7 +1082,8 @@ func resourceLinuxVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta i return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter to the GET method + resp, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if utils.ResponseWasNotFound(resp.Response) { return nil diff --git a/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go b/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go index 8e41e1793221..8af0494a94b8 100644 --- a/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go +++ b/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go @@ -20,7 +20,8 @@ func (r LinuxVirtualMachineScaleSetResource) Exists(ctx context.Context, clients return nil, err } - resp, err := clients.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := clients.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name, "") if err != nil { return nil, fmt.Errorf("retrieving Compute Linux Virtual Machine Scale Set %q: %+v", id, err) } diff --git a/internal/services/compute/managed_disk_resource.go b/internal/services/compute/managed_disk_resource.go index eb60521f7d36..61176290fbd6 100644 --- a/internal/services/compute/managed_disk_resource.go +++ b/internal/services/compute/managed_disk_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" @@ -147,6 +147,20 @@ func resourceManagedDisk() *pluginsdk.Resource { ValidateFunc: validation.IntAtLeast(1), }, + "disk_iops_read_only": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + + "disk_mbps_read_only": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "disk_encryption_set_id": { Type: pluginsdk.TypeString, Optional: true, @@ -229,6 +243,7 @@ func resourceManagedDiskCreate(d *pluginsdk.ResourceData, meta interface{}) erro createOption := compute.DiskCreateOption(d.Get("create_option").(string)) storageAccountType := d.Get("storage_account_type").(string) osType := d.Get("os_type").(string) + maxShares := d.Get("max_shares").(int) t := d.Get("tags").(map[string]interface{}) zones := azure.ExpandZones(d.Get("zones").([]interface{})) @@ -249,8 +264,8 @@ func resourceManagedDiskCreate(d *pluginsdk.ResourceData, meta interface{}) erro props.DiskSizeGB = &diskSize } - if v, ok := d.GetOk("max_shares"); ok { - props.MaxShares = utils.Int32(int32(v.(int))) + if maxShares != 0 { + props.MaxShares = utils.Int32(int32(maxShares)) } if storageAccountType == string(compute.StorageAccountTypesUltraSSDLRS) { @@ -266,11 +281,27 @@ func resourceManagedDiskCreate(d *pluginsdk.ResourceData, meta interface{}) erro props.DiskMBpsReadWrite = &diskMBps } + if v, ok := d.GetOk("disk_iops_read_only"); ok { + if maxShares == 0 { + return fmt.Errorf("[ERROR] disk_iops_read_only is only available for UltraSSD disks with shared disk enabled") + } + + props.DiskIOPSReadOnly = utils.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("disk_mbps_read_only"); ok { + if maxShares == 0 { + return fmt.Errorf("[ERROR] disk_mbps_read_only is only available for UltraSSD disks with shared disk enabled") + } + + props.DiskMBpsReadOnly = utils.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("logical_sector_size"); ok { props.CreationData.LogicalSectorSize = utils.Int32(int32(v.(int))) } - } else if d.HasChange("disk_iops_read_write") || d.HasChange("disk_mbps_read_write") || d.HasChange("logical_sector_size") { - return fmt.Errorf("[ERROR] disk_iops_read_write, disk_mbps_read_write and logical_sector_size are only available for UltraSSD disks") + } else if d.HasChange("disk_iops_read_write") || d.HasChange("disk_mbps_read_write") || d.HasChange("disk_iops_read_only") || d.HasChange("disk_mbps_read_only") || d.HasChange("logical_sector_size") { + return fmt.Errorf("[ERROR] disk_iops_read_write, disk_mbps_read_write, disk_iops_read_only, disk_mbps_read_only and logical_sector_size are only available for UltraSSD disks") } if createOption == compute.DiskCreateOptionImport { @@ -398,6 +429,7 @@ func resourceManagedDiskUpdate(d *pluginsdk.ResourceData, meta interface{}) erro name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) + maxShares := d.Get("max_shares").(int) storageAccountType := d.Get("storage_account_type").(string) shouldShutDown := false @@ -415,9 +447,7 @@ func resourceManagedDiskUpdate(d *pluginsdk.ResourceData, meta interface{}) erro } if d.HasChange("max_shares") { - v := d.Get("max_shares") - maxShares := int32(v.(int)) - diskUpdate.MaxShares = &maxShares + diskUpdate.MaxShares = utils.Int32(int32(maxShares)) var skuName compute.DiskStorageAccountTypes for _, v := range compute.PossibleDiskStorageAccountTypesValues() { if strings.EqualFold(storageAccountType, string(v)) { @@ -468,8 +498,27 @@ func resourceManagedDiskUpdate(d *pluginsdk.ResourceData, meta interface{}) erro diskMBps := int64(v.(int)) diskUpdate.DiskMBpsReadWrite = &diskMBps } - } else if d.HasChange("disk_iops_read_write") || d.HasChange("disk_mbps_read_write") { - return fmt.Errorf("[ERROR] disk_iops_read_write and disk_mbps_read_write are only available for UltraSSD disks") + + if d.HasChange("disk_iops_read_only") { + if maxShares == 0 { + return fmt.Errorf("[ERROR] disk_iops_read_only is only available for UltraSSD disks with shared disk enabled") + } + + v := d.Get("disk_iops_read_only") + diskUpdate.DiskIOPSReadOnly = utils.Int64(int64(v.(int))) + } + + if d.HasChange("disk_mbps_read_only") { + if maxShares == 0 { + return fmt.Errorf("[ERROR] disk_mbps_read_only is only available for UltraSSD disks with shared disk enabled") + } + + v := d.Get("disk_mbps_read_only") + diskUpdate.DiskMBpsReadOnly = utils.Int64(int64(v.(int))) + } + + } else if d.HasChange("disk_iops_read_write") || d.HasChange("disk_mbps_read_write") || d.HasChange("disk_iops_read_only") || d.HasChange("disk_mbps_read_only") { + return fmt.Errorf("[ERROR] disk_iops_read_write, disk_mbps_read_write, disk_iops_read_only and disk_mbps_read_only are only available for UltraSSD disks") } if d.HasChange("os_type") { @@ -585,7 +634,8 @@ func resourceManagedDiskUpdate(d *pluginsdk.ResourceData, meta interface{}) erro // De-allocate if shouldDeallocate { log.Printf("[DEBUG] Deallocating Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) - deAllocFuture, err := vmClient.Deallocate(ctx, virtualMachine.ResourceGroup, virtualMachine.Name) + // Upgrading to 2021-07-01 exposed a new hibernate paramater to the Deallocate method + deAllocFuture, err := vmClient.Deallocate(ctx, virtualMachine.ResourceGroup, virtualMachine.Name, utils.Bool(false)) if err != nil { return fmt.Errorf("Deallocating to Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) } @@ -687,6 +737,8 @@ func resourceManagedDiskRead(d *pluginsdk.ResourceData, meta interface{}) error d.Set("disk_size_gb", props.DiskSizeGB) d.Set("disk_iops_read_write", props.DiskIOPSReadWrite) d.Set("disk_mbps_read_write", props.DiskMBpsReadWrite) + d.Set("disk_iops_read_only", props.DiskIOPSReadOnly) + d.Set("disk_mbps_read_only", props.DiskMBpsReadOnly) d.Set("os_type", props.OsType) d.Set("tier", props.Tier) d.Set("max_shares", props.MaxShares) diff --git a/internal/services/compute/managed_disk_resource_test.go b/internal/services/compute/managed_disk_resource_test.go index 8d9beb317b40..7a4e53ad8cc4 100644 --- a/internal/services/compute/managed_disk_resource_test.go +++ b/internal/services/compute/managed_disk_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" @@ -475,6 +475,27 @@ func TestAccAzureRMManagedDisk_create_withTrustedLaunchEnabled(t *testing.T) { }) } +func TestAccAzureRMManagedDisk_update_withIOpsReadOnlyAndMBpsReadOnly(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") + r := ManagedDiskResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.create_withIOpsReadOnlyAndMBpsReadOnly(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update_withIOpsReadOnlyAndMBpsReadOnly(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + func (ManagedDiskResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ManagedDiskID(state.ID) if err != nil { @@ -1469,3 +1490,65 @@ resource "azurerm_managed_disk" "test" { } `, data.Locations.Primary, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } + +func (ManagedDiskResource) create_withIOpsReadOnlyAndMBpsReadOnly(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_managed_disk" "test" { + name = "acctestd-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + storage_account_type = "UltraSSD_LRS" + create_option = "Empty" + disk_size_gb = "4" + disk_iops_read_only = "101" + disk_mbps_read_only = "10" + max_shares = "2" + zones = ["1"] + + tags = { + environment = "acctest" + cost-center = "ops" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (ManagedDiskResource) update_withIOpsReadOnlyAndMBpsReadOnly(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_managed_disk" "test" { + name = "acctestd-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + storage_account_type = "UltraSSD_LRS" + create_option = "Empty" + disk_size_gb = "4" + disk_iops_read_only = "102" + disk_mbps_read_only = "11" + max_shares = "2" + zones = ["1"] + + tags = { + environment = "acctest" + cost-center = "ops" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} diff --git a/internal/services/compute/migration/legacy_vmss.go b/internal/services/compute/migration/legacy_vmss.go index 4966d08b7354..18390cc77894 100644 --- a/internal/services/compute/migration/legacy_vmss.go +++ b/internal/services/compute/migration/legacy_vmss.go @@ -700,7 +700,7 @@ func (LegacyVMSSV0ToV1) UpgradeFunc() pluginsdk.StateUpgraderFunc { resGroup := rawState["resource_group_name"].(string) name := rawState["name"].(string) - read, err := client.Get(ctx, resGroup, name) + read, err := client.Get(ctx, resGroup, name, "") if err != nil { return rawState, err } diff --git a/internal/services/compute/network_interface.go b/internal/services/compute/network_interface.go index 247c1cc9bea4..2f2338d064fe 100644 --- a/internal/services/compute/network_interface.go +++ b/internal/services/compute/network_interface.go @@ -3,7 +3,7 @@ package compute import ( "context" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" diff --git a/internal/services/compute/orchestrated_virtual_machine_scale_set.go b/internal/services/compute/orchestrated_virtual_machine_scale_set.go new file mode 100644 index 000000000000..7fe520781b77 --- /dev/null +++ b/internal/services/compute/orchestrated_virtual_machine_scale_set.go @@ -0,0 +1,1973 @@ +package compute + +import ( + "fmt" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" + azValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" + msiparse "github.com/hashicorp/terraform-provider-azurerm/internal/services/msi/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +func OrchestratedVirtualMachineScaleSetOSProfileSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "custom_data": { + Type: pluginsdk.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.StringIsBase64, + }, + "windows_configuration": OrchestratedVirtualMachineScaleSetWindowsConfigurationSchema(), + "linux_configuration": OrchestratedVirtualMachineScaleSetLinuxConfigurationSchema(), + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetWindowsConfigurationSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "admin_username": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAdminUsernameWindows, + }, + + "admin_password": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + DiffSuppressFunc: adminPasswordDiffSuppressFunc, + ValidateFunc: validatePasswordComplexityWindows, + }, + + "computer_name_prefix": computerPrefixWindowsSchema(), + + // I am only commenting this out as this is going to be supported in the next release of the API in October 2021 + // "additional_unattend_content": additionalUnattendContentSchema(), + + "enable_automatic_updates": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + }, + + "provision_vm_agent": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "secret": windowsSecretSchema(), + + "timezone": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validate.VirtualMachineTimeZone(), + }, + + "winrm_listener": winRmListenerSchema(), + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetLinuxConfigurationSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "admin_username": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAdminUsernameLinux, + }, + + "admin_password": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + DiffSuppressFunc: adminPasswordDiffSuppressFunc, + ValidateFunc: validatePasswordComplexityLinux, + }, + + "admin_ssh_key": SSHKeysSchema(false), + "computer_name_prefix": computerPrefixLinuxSchema(), + + "disable_password_authentication": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + }, + + "provision_vm_agent": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "secret": linuxSecretSchema(), + }, + }, + } +} + +// func OrchestratedVirtualMachineScaleSetExtensionsSchema() *pluginsdk.Schema { +// return &pluginsdk.Schema{ +// Type: pluginsdk.TypeSet, +// Optional: true, +// Computed: true, +// Elem: &pluginsdk.Resource{ +// Schema: map[string]*pluginsdk.Schema{ +// "name": { +// Type: pluginsdk.TypeString, +// Required: true, +// ValidateFunc: validation.StringIsNotEmpty, +// }, + +// "publisher": { +// Type: pluginsdk.TypeString, +// Required: true, +// ValidateFunc: validation.StringIsNotEmpty, +// }, + +// "type": { +// Type: pluginsdk.TypeString, +// Required: true, +// ValidateFunc: validation.StringIsNotEmpty, +// }, + +// "type_handler_version": { +// Type: pluginsdk.TypeString, +// Required: true, +// ValidateFunc: validation.StringIsNotEmpty, +// }, + +// "auto_upgrade_minor_version": { +// Type: pluginsdk.TypeBool, +// Optional: true, +// Default: true, +// }, + +// "force_update_tag": { +// Type: pluginsdk.TypeString, +// Optional: true, +// }, + +// "protected_settings": { +// Type: pluginsdk.TypeString, +// Optional: true, +// Sensitive: true, +// ValidateFunc: validation.StringIsJSON, +// }, + +// "provision_after_extensions": { +// Type: pluginsdk.TypeList, +// Optional: true, +// Elem: &pluginsdk.Schema{ +// Type: pluginsdk.TypeString, +// }, +// }, + +// "settings": { +// Type: pluginsdk.TypeString, +// Optional: true, +// ValidateFunc: validation.StringIsJSON, +// DiffSuppressFunc: pluginsdk.SuppressJsonDiff, +// }, +// }, +// }, +// } +// } + +func OrchestratedVirtualMachineScaleSetIdentitySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.ResourceIdentityTypeUserAssigned), + string(compute.ResourceIdentityTypeNone), + }, false), + }, + + "identity_ids": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "principal_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetNetworkInterfaceSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "ip_configuration": orchestratedVirtualMachineScaleSetIPConfigurationSchema(), + + "dns_servers": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + + "enable_accelerated_networking": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "enable_ip_forwarding": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "network_security_group_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceIDOrEmpty, + }, + + "primary": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetNetworkInterfaceSchemaForDataSource() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "ip_configuration": orchestratedVirtualMachineScaleSetIPConfigurationSchemaForDataSource(), + + "dns_servers": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + "enable_accelerated_networking": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + "enable_ip_forwarding": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + "network_security_group_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "primary": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + }, + }, + } +} + +func orchestratedVirtualMachineScaleSetIPConfigurationSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + // Optional + "application_gateway_backend_address_pool_ids": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, + }, + + "application_security_group_ids": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: azure.ValidateResourceID, + }, + Set: pluginsdk.HashString, + MaxItems: 20, + }, + + "load_balancer_backend_address_pool_ids": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, + }, + + // Removed per service team this attribute will never be used in VMSS Flex + // "load_balancer_inbound_nat_rules_ids": { + // Type: pluginsdk.TypeSet, + // Optional: true, + // Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + // Set: pluginsdk.HashString, + // }, + + "primary": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "public_ip_address": orchestratedVirtualMachineScaleSetPublicIPAddressSchema(), + + "subnet_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "version": { + Type: pluginsdk.TypeString, + Optional: true, + Default: string(compute.IPVersionIPv4), + ValidateFunc: validation.StringInSlice([]string{ + string(compute.IPVersionIPv4), + string(compute.IPVersionIPv6), + }, false), + }, + }, + }, + } +} + +func orchestratedVirtualMachineScaleSetIPConfigurationSchemaForDataSource() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "application_gateway_backend_address_pool_ids": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "application_security_group_ids": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "load_balancer_backend_address_pool_ids": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + // Removed per service team this attribute will never be used in VMSS Flex + // "load_balancer_inbound_nat_rules_ids": { + // Type: pluginsdk.TypeList, + // Computed: true, + // Elem: &pluginsdk.Schema{ + // Type: pluginsdk.TypeString, + // }, + // }, + + "primary": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "public_ip_address": virtualMachineScaleSetPublicIPAddressSchemaForDataSource(), + + "subnet_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "version": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + } +} + +func orchestratedVirtualMachineScaleSetPublicIPAddressSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + // Optional + "domain_name_label": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validate.OrchestratedDomainNameLabel, + }, + "idle_timeout_in_minutes": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(4, 32), + }, + "ip_tag": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "tag": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + // TODO: preview feature + // $ az feature register --namespace Microsoft.Network --name AllowBringYourOwnPublicIpAddress + // $ az provider register -n Microsoft.Network + "public_ip_prefix_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceIDOrEmpty, + }, + }, + }, + } +} + +func computerPrefixWindowsSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + + // Computed since we reuse the VM name if one's not specified + Computed: true, + ForceNew: true, + ValidateFunc: validate.WindowsComputerNamePrefix, + } +} + +func computerPrefixLinuxSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Optional: true, + + // Computed since we reuse the VM name if one's not specified + Computed: true, + ForceNew: true, + ValidateFunc: validate.LinuxComputerNamePrefix, + } +} + +func OrchestratedVirtualMachineScaleSetDataDiskSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + // TODO: does this want to be a Set? + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "caching": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.CachingTypesNone), + string(compute.CachingTypesReadOnly), + string(compute.CachingTypesReadWrite), + }, false), + }, + + "create_option": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.DiskCreateOptionTypesEmpty), + string(compute.DiskCreateOptionTypesFromImage), + }, false), + Default: string(compute.DiskCreateOptionTypesEmpty), + }, + + "disk_encryption_set_id": { + Type: pluginsdk.TypeString, + Optional: true, + // whilst the API allows updating this value, it's never actually set at Azure's end + // presumably this'll take effect once key rotation is supported a few months post-GA? + // however for now let's make this ForceNew since it can't be (successfully) updated + ForceNew: true, + ValidateFunc: validate.DiskEncryptionSetID, + }, + + "disk_size_gb": { + Type: pluginsdk.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 32767), + }, + + "lun": { + Type: pluginsdk.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 2000), // TODO: confirm upper bounds + }, + + "storage_account_type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.StorageAccountTypesPremiumLRS), + string(compute.StorageAccountTypesStandardLRS), + string(compute.StorageAccountTypesStandardSSDLRS), + string(compute.StorageAccountTypesUltraSSDLRS), + }, false), + }, + + "write_accelerator_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + // TODO 3.0 - change this to ultra_ssd_disk_iops_read_write + "disk_iops_read_write": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + }, + + // TODO 3.0 - change this to ultra_ssd_disk_iops_read_write + "disk_mbps_read_write": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + }, + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetOSDiskSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "caching": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.CachingTypesNone), + string(compute.CachingTypesReadOnly), + string(compute.CachingTypesReadWrite), + }, false), + }, + "storage_account_type": { + Type: pluginsdk.TypeString, + Required: true, + // whilst this appears in the Update block the API returns this when changing: + // Changing property 'osDisk.managedDisk.storageAccountType' is not allowed + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + // note: OS Disks don't support Ultra SSDs + string(compute.StorageAccountTypesPremiumLRS), + string(compute.StorageAccountTypesStandardLRS), + string(compute.StorageAccountTypesStandardSSDLRS), + }, false), + }, + + "diff_disk_settings": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "option": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.DiffDiskOptionsLocal), + }, false), + }, + }, + }, + }, + + "disk_encryption_set_id": { + Type: pluginsdk.TypeString, + Optional: true, + // whilst the API allows updating this value, it's never actually set at Azure's end + // presumably this'll take effect once key rotation is supported a few months post-GA? + // however for now let's make this ForceNew since it can't be (successfully) updated + ForceNew: true, + ValidateFunc: validate.DiskEncryptionSetID, + }, + + "disk_size_gb": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 4095), + }, + + "write_accelerator_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetTerminateNotificationSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "enabled": { + Type: pluginsdk.TypeBool, + Required: true, + }, + "timeout": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: azValidate.ISO8601Duration, + Default: "PT5M", + }, + }, + }, + } +} + +func OrchestratedVirtualMachineScaleSetAutomaticRepairsPolicySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "enabled": { + Type: pluginsdk.TypeBool, + Required: true, + }, + "grace_period": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "PT30M", + // this field actually has a range from 30m to 90m, is there a function that can do this validation? + ValidateFunc: azValidate.ISO8601Duration, + }, + }, + }, + } +} + +func FlattenOrchestratedVirtualMachineScaleSetOSProfile(input *compute.VirtualMachineScaleSetOSProfile, d *pluginsdk.ResourceData) []interface{} { + if input == nil { + return []interface{}{} + } + + output := make(map[string]interface{}) + + if input.CustomData != nil { + output["custom_data"] = *input.CustomData + } else { + // look up the current custom data + output["custom_data"] = utils.Base64EncodeIfNot(d.Get("os_profile.0.custom_data").(string)) + } + + if winConfig := input.WindowsConfiguration; winConfig != nil { + output["windows_configuration"] = flattenOrchestratedVirtualMachineScaleSetWindowsConfiguration(input, d) + } + + if linConfig := input.LinuxConfiguration; linConfig != nil { + output["linux_configuration"] = flattenOrchestratedVirtualMachineScaleSetLinuxConfiguration(input, d) + } + + return []interface{}{output} +} + +func validateAdminUsernameWindows(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + // **Disallowed values:** + invalidUserNames := []string{ + " ", "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", + "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", + "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5", + } + + for _, str := range invalidUserNames { + if strings.EqualFold(v, str) { + errors = append(errors, fmt.Errorf("%q can not be one of %v, got %q", key, invalidUserNames, v)) + return warnings, errors + } + } + + // Cannot end in "." + if strings.HasSuffix(input.(string), ".") { + errors = append(errors, fmt.Errorf("%q can not end with a '.', got %q", key, v)) + return warnings, errors + } + + if len(v) < 1 || len(v) > 20 { + errors = append(errors, fmt.Errorf("%q must be between 1 and 20 characters in length, got %q(%d characters)", key, v, len(v))) + return warnings, errors + } + + return +} + +func validateAdminUsernameLinux(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + // **Disallowed values:** + invalidUserNames := []string{ + " ", "abrt", "adm", "admin", "audio", "backup", "bin", "cdrom", "cgred", "console", "crontab", "daemon", "dbus", "dialout", "dip", + "disk", "fax", "floppy", "ftp", "fuse", "games", "gnats", "gopher", "haldaemon", "halt", "irc", "kmem", "landscape", "libuuid", "list", + "lock", "lp", "mail", "maildrop", "man", "mem", "messagebus", "mlocate", "modem", "netdev", "news", "nfsnobody", "nobody", "nogroup", + "ntp", "operator", "oprofile", "plugdev", "polkituser", "postdrop", "postfix", "proxy", "public", "qpidd", "root", "rpc", "rpcuser", + "sasl", "saslauth", "shadow", "shutdown", "slocate", "src", "ssh", "sshd", "staff", "stapdev", "stapusr", "sudo", "sync", "sys", "syslog", + "tape", "tcpdump", "test", "trusted", "tty", "users", "utempter", "utmp", "uucp", "uuidd", "vcsa", "video", "voice", "wheel", "whoopsie", + "www", "www-data", "wwwrun", "xok", + } + + for _, str := range invalidUserNames { + if strings.EqualFold(v, str) { + errors = append(errors, fmt.Errorf("%q can not be one of %s, got %q", key, azure.QuotedStringSlice(invalidUserNames), v)) + return warnings, errors + } + } + + if len(v) < 1 || len(v) > 64 { + errors = append(errors, fmt.Errorf("%q must be between 1 and 64 characters in length, got %q(%d characters)", key, v, len(v))) + return warnings, errors + } + + return +} + +func validatePasswordComplexityWindows(input interface{}, key string) (warnings []string, errors []error) { + return validatePasswordComplexity(input, key, 8, 123) +} + +func validatePasswordComplexityLinux(input interface{}, key string) (warnings []string, errors []error) { + return validatePasswordComplexity(input, key, 6, 72) +} + +func validatePasswordComplexity(input interface{}, key string, min int, max int) (warnings []string, errors []error) { + password, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return warnings, errors + } + + complexityMatch := 0 + re := regexp.MustCompile(`[a-z]{1,}`) + if re != nil && re.MatchString(password) { + complexityMatch++ + } + + re = regexp.MustCompile(`[A-Z]{1,}`) + if re != nil && re.MatchString(password) { + complexityMatch++ + } + + re = regexp.MustCompile(`[0-9]{1,}`) + if re != nil && re.MatchString(password) { + complexityMatch++ + } + + re = regexp.MustCompile(`[\W_]{1,}`) + if re != nil && re.MatchString(password) { + complexityMatch++ + } + + if complexityMatch < 3 { + errors = append(errors, fmt.Errorf("%q did not meet minimum password complexity requirements. A password must contain at least 3 of the 4 following conditions: a lower case character, a upper case character, a digit and/or a special character. Got %q", key, password)) + return warnings, errors + } + + if len(password) < min || len(password) > max { + errors = append(errors, fmt.Errorf("%q must be at least 6 characters long and less than 72 characters long. Got %q(%d characters)", key, password, len(password))) + return warnings, errors + } + + // NOTE: I realize that some of these will not pass the above complexity checks, but they are in the API so I am checking + // the same values that the API is... + disallowedValues := []string{ + "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!", + } + + for _, str := range disallowedValues { + if password == str { + errors = append(errors, fmt.Errorf("%q can not be one of %s, got %q", key, azure.QuotedStringSlice(disallowedValues), password)) + return warnings, errors + } + } + + return warnings, errors +} + +func expandOrchestratedVirtualMachineScaleSetOsProfileWithWindowsConfiguration(input map[string]interface{}, customData string) *compute.VirtualMachineScaleSetOSProfile { + osProfile := compute.VirtualMachineScaleSetOSProfile{} + winConfig := compute.WindowsConfiguration{} + + if len(input) > 0 { + osProfile.CustomData = utils.String(customData) + osProfile.AdminUsername = utils.String(input["admin_username"].(string)) + osProfile.AdminPassword = utils.String(input["admin_password"].(string)) + + if computerPrefix := input["computer_name_prefix"].(string); computerPrefix != "" { + osProfile.ComputerNamePrefix = utils.String(computerPrefix) + } + + if secrets := input["secret"].([]interface{}); len(secrets) > 0 { + osProfile.Secrets = expandWindowsSecrets(secrets) + } + + // I am only commenting this out as this is going to be supported in the next release of the API in October 2021 + // winConfig.AdditionalUnattendContent = expandWindowsConfigurationAdditionalUnattendContent(input["additional_unattend_content"].([]interface{})) + winConfig.EnableAutomaticUpdates = utils.Bool(input["enable_automatic_updates"].(bool)) + winConfig.ProvisionVMAgent = utils.Bool(input["provision_vm_agent"].(bool)) + winConfig.TimeZone = utils.String(input["timezone"].(string)) + winRmListenersRaw := input["winrm_listener"].(*pluginsdk.Set).List() + winConfig.WinRM = expandWinRMListener(winRmListenersRaw) + } + + osProfile.WindowsConfiguration = &winConfig + + return &osProfile +} + +func expandOrchestratedVirtualMachineScaleSetOsProfileWithLinuxConfiguration(input map[string]interface{}, customData string) *compute.VirtualMachineScaleSetOSProfile { + osProfile := compute.VirtualMachineScaleSetOSProfile{} + linConfig := compute.LinuxConfiguration{} + + if len(input) > 0 { + osProfile.CustomData = utils.String(customData) + osProfile.AdminUsername = utils.String(input["admin_username"].(string)) + + if adminPassword := input["admin_password"].(string); adminPassword != "" { + osProfile.AdminPassword = utils.String(adminPassword) + } + + if computerPrefix := input["computer_name_prefix"].(string); computerPrefix != "" { + osProfile.ComputerNamePrefix = utils.String(computerPrefix) + } + + if secrets := input["secret"].([]interface{}); len(secrets) > 0 { + osProfile.Secrets = expandLinuxSecrets(secrets) + } + + if sshPublicKeys := ExpandSSHKeys(input["admin_ssh_key"].(*pluginsdk.Set).List()); len(sshPublicKeys) > 0 { + if linConfig.SSH == nil { + linConfig.SSH = &compute.SSHConfiguration{} + } + linConfig.SSH.PublicKeys = &sshPublicKeys + } + + linConfig.DisablePasswordAuthentication = utils.Bool(input["disable_password_authentication"].(bool)) + linConfig.ProvisionVMAgent = utils.Bool(input["provision_vm_agent"].(bool)) + } + + osProfile.LinuxConfiguration = &linConfig + + return &osProfile +} + +// I am only commenting this out as this is going to be supported in the next release of the API in October 2021 +// func expandWindowsConfigurationAdditionalUnattendContent(input []interface{}) *[]compute.AdditionalUnattendContent { +// output := make([]compute.AdditionalUnattendContent, 0) + +// for _, v := range input { +// raw := v.(map[string]interface{}) + +// output = append(output, compute.AdditionalUnattendContent{ +// SettingName: compute.SettingNames(raw["setting"].(string)), +// Content: utils.String(raw["content"].(string)), + +// // no other possible values +// PassName: compute.PassNamesOobeSystem, +// ComponentName: compute.ComponentNamesMicrosoftWindowsShellSetup, +// }) +// } + +// return &output +// } + +func ExpandOrchestratedVirtualMachineScaleSetIdentity(input []interface{}) (*compute.VirtualMachineScaleSetIdentity, error) { + if len(input) == 0 { + // TODO: Does this want to be this, or nil? + return &compute.VirtualMachineScaleSetIdentity{ + Type: compute.ResourceIdentityTypeNone, + }, nil + } + + raw := input[0].(map[string]interface{}) + + identity := compute.VirtualMachineScaleSetIdentity{ + Type: compute.ResourceIdentityType(raw["type"].(string)), + } + + identityIdsRaw := raw["identity_ids"].(*pluginsdk.Set).List() + identityIds := make(map[string]*compute.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue) + for _, v := range identityIdsRaw { + identityIds[v.(string)] = &compute.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue{} + } + + if len(identityIds) > 0 { + if identity.Type != compute.ResourceIdentityTypeUserAssigned && identity.Type != compute.ResourceIdentityTypeSystemAssignedUserAssigned { + return nil, fmt.Errorf("`identity_ids` can only be specified when `type` includes `UserAssigned`") + } + + identity.UserAssignedIdentities = identityIds + } + + return &identity, nil +} + +func ExpandOrchestratedVirtualMachineScaleSetNetworkInterface(input []interface{}) (*[]compute.VirtualMachineScaleSetNetworkConfiguration, error) { + output := make([]compute.VirtualMachineScaleSetNetworkConfiguration, 0) + + for _, v := range input { + raw := v.(map[string]interface{}) + + dnsServers := utils.ExpandStringSlice(raw["dns_servers"].([]interface{})) + + ipConfigurations := make([]compute.VirtualMachineScaleSetIPConfiguration, 0) + ipConfigurationsRaw := raw["ip_configuration"].([]interface{}) + for _, configV := range ipConfigurationsRaw { + configRaw := configV.(map[string]interface{}) + ipConfiguration, err := expandOrchestratedVirtualMachineScaleSetIPConfiguration(configRaw) + if err != nil { + return nil, err + } + + ipConfigurations = append(ipConfigurations, *ipConfiguration) + } + + config := compute.VirtualMachineScaleSetNetworkConfiguration{ + Name: utils.String(raw["name"].(string)), + VirtualMachineScaleSetNetworkConfigurationProperties: &compute.VirtualMachineScaleSetNetworkConfigurationProperties{ + DNSSettings: &compute.VirtualMachineScaleSetNetworkConfigurationDNSSettings{ + DNSServers: dnsServers, + }, + EnableAcceleratedNetworking: utils.Bool(raw["enable_accelerated_networking"].(bool)), + EnableIPForwarding: utils.Bool(raw["enable_ip_forwarding"].(bool)), + IPConfigurations: &ipConfigurations, + Primary: utils.Bool(raw["primary"].(bool)), + }, + } + + if nsgId := raw["network_security_group_id"].(string); nsgId != "" { + config.VirtualMachineScaleSetNetworkConfigurationProperties.NetworkSecurityGroup = &compute.SubResource{ + ID: utils.String(nsgId), + } + } + + output = append(output, config) + } + + return &output, nil +} + +func expandOrchestratedVirtualMachineScaleSetIPConfiguration(raw map[string]interface{}) (*compute.VirtualMachineScaleSetIPConfiguration, error) { + applicationGatewayBackendAddressPoolIdsRaw := raw["application_gateway_backend_address_pool_ids"].(*pluginsdk.Set).List() + applicationGatewayBackendAddressPoolIds := expandIDsToSubResources(applicationGatewayBackendAddressPoolIdsRaw) + + applicationSecurityGroupIdsRaw := raw["application_security_group_ids"].(*pluginsdk.Set).List() + applicationSecurityGroupIds := expandIDsToSubResources(applicationSecurityGroupIdsRaw) + + loadBalancerBackendAddressPoolIdsRaw := raw["load_balancer_backend_address_pool_ids"].(*pluginsdk.Set).List() + loadBalancerBackendAddressPoolIds := expandIDsToSubResources(loadBalancerBackendAddressPoolIdsRaw) + + // Removed per service team this attribute will never be used in VMSS Flex + // loadBalancerInboundNatPoolIdsRaw := raw["load_balancer_inbound_nat_rules_ids"].(*pluginsdk.Set).List() + // loadBalancerInboundNatPoolIds := expandIDsToSubResources(loadBalancerInboundNatPoolIdsRaw) + + primary := raw["primary"].(bool) + version := compute.IPVersion(raw["version"].(string)) + if primary && version == compute.IPVersionIPv6 { + return nil, fmt.Errorf("an IPv6 Primary IP Configuration is unsupported - instead add a IPv4 IP Configuration as the Primary and make the IPv6 IP Configuration the secondary") + } + + ipConfiguration := compute.VirtualMachineScaleSetIPConfiguration{ + Name: utils.String(raw["name"].(string)), + VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{ + Primary: utils.Bool(primary), + PrivateIPAddressVersion: version, + ApplicationGatewayBackendAddressPools: applicationGatewayBackendAddressPoolIds, + ApplicationSecurityGroups: applicationSecurityGroupIds, + LoadBalancerBackendAddressPools: loadBalancerBackendAddressPoolIds, + // Removed per service team this attribute will never be used in VMSS Flex + // LoadBalancerInboundNatPools: loadBalancerInboundNatPoolIds, + }, + } + + if subnetId := raw["subnet_id"].(string); subnetId != "" { + ipConfiguration.VirtualMachineScaleSetIPConfigurationProperties.Subnet = &compute.APIEntityReference{ + ID: utils.String(subnetId), + } + } + + publicIPConfigsRaw := raw["public_ip_address"].([]interface{}) + if len(publicIPConfigsRaw) > 0 { + publicIPConfigRaw := publicIPConfigsRaw[0].(map[string]interface{}) + publicIPAddressConfig := expandOrchestratedVirtualMachineScaleSetPublicIPAddress(publicIPConfigRaw) + ipConfiguration.VirtualMachineScaleSetIPConfigurationProperties.PublicIPAddressConfiguration = publicIPAddressConfig + } + + return &ipConfiguration, nil +} + +func expandOrchestratedVirtualMachineScaleSetPublicIPAddress(raw map[string]interface{}) *compute.VirtualMachineScaleSetPublicIPAddressConfiguration { + ipTagsRaw := raw["ip_tag"].([]interface{}) + ipTags := make([]compute.VirtualMachineScaleSetIPTag, 0) + for _, ipTagV := range ipTagsRaw { + ipTagRaw := ipTagV.(map[string]interface{}) + ipTags = append(ipTags, compute.VirtualMachineScaleSetIPTag{ + Tag: utils.String(ipTagRaw["tag"].(string)), + IPTagType: utils.String(ipTagRaw["type"].(string)), + }) + } + + publicIPAddressConfig := compute.VirtualMachineScaleSetPublicIPAddressConfiguration{ + Name: utils.String(raw["name"].(string)), + VirtualMachineScaleSetPublicIPAddressConfigurationProperties: &compute.VirtualMachineScaleSetPublicIPAddressConfigurationProperties{ + IPTags: &ipTags, + }, + } + + if domainNameLabel := raw["domain_name_label"].(string); domainNameLabel != "" { + dns := &compute.VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings{ + DomainNameLabel: utils.String(domainNameLabel), + } + publicIPAddressConfig.VirtualMachineScaleSetPublicIPAddressConfigurationProperties.DNSSettings = dns + } + + if idleTimeout := raw["idle_timeout_in_minutes"].(int); idleTimeout > 0 { + publicIPAddressConfig.VirtualMachineScaleSetPublicIPAddressConfigurationProperties.IdleTimeoutInMinutes = utils.Int32(int32(raw["idle_timeout_in_minutes"].(int))) + } + + if publicIPPrefixID := raw["public_ip_prefix_id"].(string); publicIPPrefixID != "" { + publicIPAddressConfig.VirtualMachineScaleSetPublicIPAddressConfigurationProperties.PublicIPPrefix = &compute.SubResource{ + ID: utils.String(publicIPPrefixID), + } + } + + return &publicIPAddressConfig +} + +func ExpandOrchestratedVirtualMachineScaleSetNetworkInterfaceUpdate(input []interface{}) (*[]compute.VirtualMachineScaleSetUpdateNetworkConfiguration, error) { + output := make([]compute.VirtualMachineScaleSetUpdateNetworkConfiguration, 0) + + for _, v := range input { + raw := v.(map[string]interface{}) + + dnsServers := utils.ExpandStringSlice(raw["dns_servers"].([]interface{})) + + ipConfigurations := make([]compute.VirtualMachineScaleSetUpdateIPConfiguration, 0) + ipConfigurationsRaw := raw["ip_configuration"].([]interface{}) + for _, configV := range ipConfigurationsRaw { + configRaw := configV.(map[string]interface{}) + ipConfiguration, err := expandOrchestratedVirtualMachineScaleSetIPConfigurationUpdate(configRaw) + if err != nil { + return nil, err + } + + ipConfigurations = append(ipConfigurations, *ipConfiguration) + } + + config := compute.VirtualMachineScaleSetUpdateNetworkConfiguration{ + Name: utils.String(raw["name"].(string)), + VirtualMachineScaleSetUpdateNetworkConfigurationProperties: &compute.VirtualMachineScaleSetUpdateNetworkConfigurationProperties{ + DNSSettings: &compute.VirtualMachineScaleSetNetworkConfigurationDNSSettings{ + DNSServers: dnsServers, + }, + EnableAcceleratedNetworking: utils.Bool(raw["enable_accelerated_networking"].(bool)), + EnableIPForwarding: utils.Bool(raw["enable_ip_forwarding"].(bool)), + IPConfigurations: &ipConfigurations, + Primary: utils.Bool(raw["primary"].(bool)), + }, + } + + if nsgId := raw["network_security_group_id"].(string); nsgId != "" { + config.VirtualMachineScaleSetUpdateNetworkConfigurationProperties.NetworkSecurityGroup = &compute.SubResource{ + ID: utils.String(nsgId), + } + } + + output = append(output, config) + } + + return &output, nil +} + +func expandOrchestratedVirtualMachineScaleSetIPConfigurationUpdate(raw map[string]interface{}) (*compute.VirtualMachineScaleSetUpdateIPConfiguration, error) { + applicationGatewayBackendAddressPoolIdsRaw := raw["application_gateway_backend_address_pool_ids"].(*pluginsdk.Set).List() + applicationGatewayBackendAddressPoolIds := expandIDsToSubResources(applicationGatewayBackendAddressPoolIdsRaw) + + applicationSecurityGroupIdsRaw := raw["application_security_group_ids"].(*pluginsdk.Set).List() + applicationSecurityGroupIds := expandIDsToSubResources(applicationSecurityGroupIdsRaw) + + loadBalancerBackendAddressPoolIdsRaw := raw["load_balancer_backend_address_pool_ids"].(*pluginsdk.Set).List() + loadBalancerBackendAddressPoolIds := expandIDsToSubResources(loadBalancerBackendAddressPoolIdsRaw) + + // Removed per service team this attribute will never be used in VMSS Flex + // loadBalancerInboundNatPoolIdsRaw := raw["load_balancer_inbound_nat_rules_ids"].(*pluginsdk.Set).List() + // loadBalancerInboundNatPoolIds := expandIDsToSubResources(loadBalancerInboundNatPoolIdsRaw) + + primary := raw["primary"].(bool) + version := compute.IPVersion(raw["version"].(string)) + + if primary && version == compute.IPVersionIPv6 { + return nil, fmt.Errorf("an IPv6 Primary IP Configuration is unsupported - instead add a IPv4 IP Configuration as the Primary and make the IPv6 IP Configuration the secondary") + } + + ipConfiguration := compute.VirtualMachineScaleSetUpdateIPConfiguration{ + Name: utils.String(raw["name"].(string)), + VirtualMachineScaleSetUpdateIPConfigurationProperties: &compute.VirtualMachineScaleSetUpdateIPConfigurationProperties{ + Primary: utils.Bool(primary), + PrivateIPAddressVersion: version, + ApplicationGatewayBackendAddressPools: applicationGatewayBackendAddressPoolIds, + ApplicationSecurityGroups: applicationSecurityGroupIds, + LoadBalancerBackendAddressPools: loadBalancerBackendAddressPoolIds, + // Removed per service team this attribute will never be used in VMSS Flex + // LoadBalancerInboundNatPools: loadBalancerInboundNatPoolIds, + }, + } + + if subnetId := raw["subnet_id"].(string); subnetId != "" { + ipConfiguration.VirtualMachineScaleSetUpdateIPConfigurationProperties.Subnet = &compute.APIEntityReference{ + ID: utils.String(subnetId), + } + } + + publicIPConfigsRaw := raw["public_ip_address"].([]interface{}) + if len(publicIPConfigsRaw) > 0 { + publicIPConfigRaw := publicIPConfigsRaw[0].(map[string]interface{}) + publicIPAddressConfig := expandOrchestratedVirtualMachineScaleSetPublicIPAddressUpdate(publicIPConfigRaw) + ipConfiguration.VirtualMachineScaleSetUpdateIPConfigurationProperties.PublicIPAddressConfiguration = publicIPAddressConfig + } + + return &ipConfiguration, nil +} + +func expandOrchestratedVirtualMachineScaleSetPublicIPAddressUpdate(raw map[string]interface{}) *compute.VirtualMachineScaleSetUpdatePublicIPAddressConfiguration { + publicIPAddressConfig := compute.VirtualMachineScaleSetUpdatePublicIPAddressConfiguration{ + Name: utils.String(raw["name"].(string)), + VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties: &compute.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties{}, + } + + if domainNameLabel := raw["domain_name_label"].(string); domainNameLabel != "" { + dns := &compute.VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings{ + DomainNameLabel: utils.String(domainNameLabel), + } + publicIPAddressConfig.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties.DNSSettings = dns + } + + if idleTimeout := raw["idle_timeout_in_minutes"].(int); idleTimeout > 0 { + publicIPAddressConfig.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties.IdleTimeoutInMinutes = utils.Int32(int32(raw["idle_timeout_in_minutes"].(int))) + } + + return &publicIPAddressConfig +} + +func ExpandOrchestratedVirtualMachineScaleSetDataDisk(input []interface{}, ultraSSDEnabled bool) (*[]compute.VirtualMachineScaleSetDataDisk, error) { + disks := make([]compute.VirtualMachineScaleSetDataDisk, 0) + + for _, v := range input { + raw := v.(map[string]interface{}) + + disk := compute.VirtualMachineScaleSetDataDisk{ + Caching: compute.CachingTypes(raw["caching"].(string)), + DiskSizeGB: utils.Int32(int32(raw["disk_size_gb"].(int))), + Lun: utils.Int32(int32(raw["lun"].(int))), + ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: compute.StorageAccountTypes(raw["storage_account_type"].(string)), + }, + WriteAcceleratorEnabled: utils.Bool(raw["write_accelerator_enabled"].(bool)), + CreateOption: compute.DiskCreateOptionTypes(raw["create_option"].(string)), + } + + if id := raw["disk_encryption_set_id"].(string); id != "" { + disk.ManagedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ + ID: utils.String(id), + } + } + + if iops := raw["disk_iops_read_write"].(int); iops != 0 { + if !ultraSSDEnabled { + return nil, fmt.Errorf("`disk_iops_read_write` are only available for UltraSSD disks") + } + disk.DiskIOPSReadWrite = utils.Int64(int64(iops)) + } + + if mbps := raw["disk_mbps_read_write"].(int); mbps != 0 { + if !ultraSSDEnabled { + return nil, fmt.Errorf("`disk_mbps_read_write` are only available for UltraSSD disks") + } + disk.DiskMBpsReadWrite = utils.Int64(int64(mbps)) + } + + disks = append(disks, disk) + } + + return &disks, nil +} + +func ExpandOrchestratedVirtualMachineScaleSetOSDisk(input []interface{}, osType compute.OperatingSystemTypes) *compute.VirtualMachineScaleSetOSDisk { + raw := input[0].(map[string]interface{}) + disk := compute.VirtualMachineScaleSetOSDisk{ + Caching: compute.CachingTypes(raw["caching"].(string)), + ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: compute.StorageAccountTypes(raw["storage_account_type"].(string)), + }, + WriteAcceleratorEnabled: utils.Bool(raw["write_accelerator_enabled"].(bool)), + + // these have to be hard-coded so there's no point exposing them + CreateOption: compute.DiskCreateOptionTypesFromImage, + OsType: osType, + } + + if diskEncryptionSetId := raw["disk_encryption_set_id"].(string); diskEncryptionSetId != "" { + disk.ManagedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ + ID: utils.String(diskEncryptionSetId), + } + } + + if osDiskSize := raw["disk_size_gb"].(int); osDiskSize > 0 { + disk.DiskSizeGB = utils.Int32(int32(osDiskSize)) + } + + if diffDiskSettingsRaw := raw["diff_disk_settings"].([]interface{}); len(diffDiskSettingsRaw) > 0 { + diffDiskRaw := diffDiskSettingsRaw[0].(map[string]interface{}) + disk.DiffDiskSettings = &compute.DiffDiskSettings{ + Option: compute.DiffDiskOptions(diffDiskRaw["option"].(string)), + } + } + + return &disk +} + +func ExpandOrchestratedVirtualMachineScaleSetOSDiskUpdate(input []interface{}) *compute.VirtualMachineScaleSetUpdateOSDisk { + raw := input[0].(map[string]interface{}) + disk := compute.VirtualMachineScaleSetUpdateOSDisk{ + Caching: compute.CachingTypes(raw["caching"].(string)), + ManagedDisk: &compute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: compute.StorageAccountTypes(raw["storage_account_type"].(string)), + }, + WriteAcceleratorEnabled: utils.Bool(raw["write_accelerator_enabled"].(bool)), + } + + if diskEncryptionSetId := raw["disk_encryption_set_id"].(string); diskEncryptionSetId != "" { + disk.ManagedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ + ID: utils.String(diskEncryptionSetId), + } + } + + if osDiskSize := raw["disk_size_gb"].(int); osDiskSize > 0 { + disk.DiskSizeGB = utils.Int32(int32(osDiskSize)) + } + + return &disk +} + +func ExpandOrchestratedVirtualMachineScaleSetScheduledEventsProfile(input []interface{}) *compute.ScheduledEventsProfile { + if len(input) == 0 { + return nil + } + + raw := input[0].(map[string]interface{}) + enabled := raw["enabled"].(bool) + timeout := raw["timeout"].(string) + + return &compute.ScheduledEventsProfile{ + TerminateNotificationProfile: &compute.TerminateNotificationProfile{ + Enable: &enabled, + NotBeforeTimeout: &timeout, + }, + } +} + +func ExpandOrchestratedVirtualMachineScaleSetAutomaticRepairsPolicy(input []interface{}) *compute.AutomaticRepairsPolicy { + if len(input) == 0 { + return nil + } + + raw := input[0].(map[string]interface{}) + + return &compute.AutomaticRepairsPolicy{ + Enabled: utils.Bool(raw["enabled"].(bool)), + GracePeriod: utils.String(raw["grace_period"].(string)), + } +} + +// func expandOrchestratedVirtualMachineScaleSetExtensions(input []interface{}) (extensionProfile *compute.VirtualMachineScaleSetExtensionProfile, err error) { +// extensionProfile = &compute.VirtualMachineScaleSetExtensionProfile{} +// if len(input) == 0 { +// return nil, nil +// } + +// extensions := make([]compute.VirtualMachineScaleSetExtension, 0) +// for _, v := range input { +// extensionRaw := v.(map[string]interface{}) +// extension := compute.VirtualMachineScaleSetExtension{ +// Name: utils.String(extensionRaw["name"].(string)), +// } +// extensionType := extensionRaw["type"].(string) + +// extensionProps := compute.VirtualMachineScaleSetExtensionProperties{ +// Publisher: utils.String(extensionRaw["publisher"].(string)), +// Type: &extensionType, +// TypeHandlerVersion: utils.String(extensionRaw["type_handler_version"].(string)), +// AutoUpgradeMinorVersion: utils.Bool(extensionRaw["auto_upgrade_minor_version"].(bool)), +// ProvisionAfterExtensions: utils.ExpandStringSlice(extensionRaw["provision_after_extensions"].([]interface{})), +// } + +// // Leaving this here as it is going to be in the GA API +// // if extensionType == "ApplicationHealthLinux" || extensionType == "ApplicationHealthWindows" { +// // hasHealthExtension = true +// // } + +// if forceUpdateTag := extensionRaw["force_update_tag"]; forceUpdateTag != nil { +// extensionProps.ForceUpdateTag = utils.String(forceUpdateTag.(string)) +// } + +// if val, ok := extensionRaw["settings"]; ok && val.(string) != "" { +// settings, err := pluginsdk.ExpandJsonFromString(val.(string)) +// if err != nil { +// return nil, fmt.Errorf("failed to parse JSON from `settings`: %+v", err) +// } +// extensionProps.Settings = settings +// } + +// if val, ok := extensionRaw["protected_settings"]; ok && val.(string) != "" { +// protectedSettings, err := pluginsdk.ExpandJsonFromString(val.(string)) +// if err != nil { +// return nil, fmt.Errorf("failed to parse JSON from `protected_settings`: %+v", err) +// } +// extensionProps.ProtectedSettings = protectedSettings +// } + +// extension.VirtualMachineScaleSetExtensionProperties = &extensionProps +// extensions = append(extensions, extension) +// } +// extensionProfile.Extensions = &extensions + +// return extensionProfile, nil +// } + +// func flattenOrchestratedVirtualMachineScaleSetExtensions(input *compute.VirtualMachineScaleSetExtensionProfile, d *pluginsdk.ResourceData) ([]map[string]interface{}, error) { +// result := make([]map[string]interface{}, 0) +// if input == nil || input.Extensions == nil { +// return result, nil +// } + +// for k, v := range *input.Extensions { +// name := "" +// if v.Name != nil { +// name = *v.Name +// } + +// autoUpgradeMinorVersion := false +// forceUpdateTag := "" +// provisionAfterExtension := make([]interface{}, 0) +// protectedSettings := "" +// extPublisher := "" +// extSettings := "" +// extType := "" +// extTypeVersion := "" + +// if props := v.VirtualMachineScaleSetExtensionProperties; props != nil { +// if props.Publisher != nil { +// extPublisher = *props.Publisher +// } + +// if props.Type != nil { +// extType = *props.Type +// } + +// if props.TypeHandlerVersion != nil { +// extTypeVersion = *props.TypeHandlerVersion +// } + +// if props.AutoUpgradeMinorVersion != nil { +// autoUpgradeMinorVersion = *props.AutoUpgradeMinorVersion +// } + +// if props.ForceUpdateTag != nil { +// forceUpdateTag = *props.ForceUpdateTag +// } + +// if props.ProvisionAfterExtensions != nil { +// provisionAfterExtension = utils.FlattenStringSlice(props.ProvisionAfterExtensions) +// } + +// if props.Settings != nil { +// extSettingsRaw, err := pluginsdk.FlattenJsonToString(props.Settings.(map[string]interface{})) +// if err != nil { +// return nil, err +// } +// extSettings = extSettingsRaw +// } +// } +// // protected_settings isn't returned, so we attempt to get it from config otherwise set to empty string +// if protectedSettingsFromConfig, ok := d.GetOk(fmt.Sprintf("extension.%d.protected_settings", k)); ok { +// if protectedSettingsFromConfig.(string) != "" && protectedSettingsFromConfig.(string) != "{}" { +// protectedSettings = protectedSettingsFromConfig.(string) +// } +// } + +// result = append(result, map[string]interface{}{ +// "name": name, +// "auto_upgrade_minor_version": autoUpgradeMinorVersion, +// "force_update_tag": forceUpdateTag, +// "provision_after_extensions": provisionAfterExtension, +// "protected_settings": protectedSettings, +// "publisher": extPublisher, +// "settings": extSettings, +// "type": extType, +// "type_handler_version": extTypeVersion, +// }) +// } +// return result, nil +// } + +func FlattenOrchestratedVirtualMachineScaleSetIPConfiguration(input compute.VirtualMachineScaleSetIPConfiguration) map[string]interface{} { + var name, subnetId string + if input.Name != nil { + name = *input.Name + } + if input.Subnet != nil && input.Subnet.ID != nil { + subnetId = *input.Subnet.ID + } + + var primary bool + if input.Primary != nil { + primary = *input.Primary + } + + var publicIPAddresses []interface{} + if input.PublicIPAddressConfiguration != nil { + publicIPAddresses = append(publicIPAddresses, FlattenOrchestratedVirtualMachineScaleSetPublicIPAddress(*input.PublicIPAddressConfiguration)) + } + + applicationGatewayBackendAddressPoolIds := flattenSubResourcesToIDs(input.ApplicationGatewayBackendAddressPools) + applicationSecurityGroupIds := flattenSubResourcesToIDs(input.ApplicationSecurityGroups) + loadBalancerBackendAddressPoolIds := flattenSubResourcesToIDs(input.LoadBalancerBackendAddressPools) + // Removed per service team this attribute will never be used in VMSS Flex + // loadBalancerInboundNatRuleIds := flattenSubResourcesToIDs(input.LoadBalancerInboundNatPools) + + return map[string]interface{}{ + "name": name, + "primary": primary, + "public_ip_address": publicIPAddresses, + "subnet_id": subnetId, + "version": string(input.PrivateIPAddressVersion), + "application_gateway_backend_address_pool_ids": applicationGatewayBackendAddressPoolIds, + "application_security_group_ids": applicationSecurityGroupIds, + "load_balancer_backend_address_pool_ids": loadBalancerBackendAddressPoolIds, + // Removed per service team this attribute will never be used in VMSS Flex + // "load_balancer_inbound_nat_rules_ids": loadBalancerInboundNatRuleIds, + } +} + +func FlattenOrchestratedVirtualMachineScaleSetPublicIPAddress(input compute.VirtualMachineScaleSetPublicIPAddressConfiguration) map[string]interface{} { + ipTags := make([]interface{}, 0) + if input.IPTags != nil { + for _, rawTag := range *input.IPTags { + var tag, tagType string + + if rawTag.IPTagType != nil { + tagType = *rawTag.IPTagType + } + + if rawTag.Tag != nil { + tag = *rawTag.Tag + } + + ipTags = append(ipTags, map[string]interface{}{ + "tag": tag, + "type": tagType, + }) + } + } + + var domainNameLabel, name, publicIPPrefixId string + if input.DNSSettings != nil && input.DNSSettings.DomainNameLabel != nil { + domainNameLabel = *input.DNSSettings.DomainNameLabel + } + if input.Name != nil { + name = *input.Name + } + if input.PublicIPPrefix != nil && input.PublicIPPrefix.ID != nil { + publicIPPrefixId = *input.PublicIPPrefix.ID + } + + var idleTimeoutInMinutes int + if input.IdleTimeoutInMinutes != nil { + idleTimeoutInMinutes = int(*input.IdleTimeoutInMinutes) + } + + return map[string]interface{}{ + "name": name, + "domain_name_label": domainNameLabel, + "idle_timeout_in_minutes": idleTimeoutInMinutes, + "ip_tag": ipTags, + "public_ip_prefix_id": publicIPPrefixId, + } +} + +func flattenOrchestratedVirtualMachineScaleSetWindowsConfiguration(input *compute.VirtualMachineScaleSetOSProfile, d *pluginsdk.ResourceData) []interface{} { + if input == nil { + return []interface{}{} + } + + output := make(map[string]interface{}) + winConfig := input.WindowsConfiguration + + if v := input.AdminUsername; v != nil { + output["admin_username"] = *v + } + + if v := d.Get("os_profile").([]interface{}); len(v) > 0 { + osProfile := v[0].(map[string]interface{}) + if winConfigRaw := osProfile["windows_configuration"].([]interface{}); len(winConfigRaw) > 0 { + winCfg := winConfigRaw[0].(map[string]interface{}) + output["admin_password"] = winCfg["admin_password"].(string) + } + } + + if v := input.ComputerNamePrefix; v != nil { + output["computer_name_prefix"] = *v + } + + // I am only commenting this out as this is going to be supported in the next release of the API in October 2021 + // if v := winConfig.AdditionalUnattendContent; v != nil { + // output["additional_unattend_content"] = flattenWindowsConfigurationAdditionalUnattendContent(winConfig) + // } + + if v := winConfig.EnableAutomaticUpdates; v != nil { + output["enable_automatic_updates"] = *v + } + + if v := winConfig.ProvisionVMAgent; v != nil { + output["provision_vm_agent"] = *v + } + + if v := input.Secrets; v != nil { + output["secret"] = flattenWindowsSecrets(v) + } + + if v := winConfig.WinRM; v != nil { + output["winrm_listener"] = flattenWinRMListener(winConfig.WinRM) + } + + if v := winConfig.TimeZone; v != nil { + output["timezone"] = v + } + + return []interface{}{output} +} + +func flattenOrchestratedVirtualMachineScaleSetLinuxConfiguration(input *compute.VirtualMachineScaleSetOSProfile, d *pluginsdk.ResourceData) []interface{} { + if input == nil { + return []interface{}{} + } + + output := make(map[string]interface{}) + linConfig := input.LinuxConfiguration + + if v := input.AdminUsername; v != nil { + output["admin_username"] = *v + } + + if v := d.Get("os_profile").([]interface{}); len(v) > 0 { + osProfile := v[0].(map[string]interface{}) + if linConfigRaw := osProfile["linux_configuration"].([]interface{}); len(linConfigRaw) > 0 { + linCfg := linConfigRaw[0].(map[string]interface{}) + output["admin_password"] = linCfg["admin_password"].(string) + } + } + + // if v := input.AdminPassword; v != nil { + // output["admin_password"] = *v + // } + + if v := linConfig.SSH; v != nil { + if sshKeys, _ := FlattenSSHKeys(v); sshKeys != nil { + output["admin_ssh_key"] = pluginsdk.NewSet(SSHKeySchemaHash, *sshKeys) + } + } + + if v := input.ComputerNamePrefix; v != nil { + output["computer_name_prefix"] = *v + } + + if v := linConfig.DisablePasswordAuthentication; v != nil { + output["disable_password_authentication"] = *v + } + + if v := linConfig.ProvisionVMAgent; v != nil { + output["provision_vm_agent"] = *v + } + + if v := input.Secrets; v != nil { + output["secret"] = flattenLinuxSecrets(v) + } + + return []interface{}{output} +} + +func FlattenOrchestratedVirtualMachineScaleSetNetworkInterface(input *[]compute.VirtualMachineScaleSetNetworkConfiguration) []interface{} { + if input == nil { + return []interface{}{} + } + + results := make([]interface{}, 0) + for _, v := range *input { + var name, networkSecurityGroupId string + if v.Name != nil { + name = *v.Name + } + if v.NetworkSecurityGroup != nil && v.NetworkSecurityGroup.ID != nil { + networkSecurityGroupId = *v.NetworkSecurityGroup.ID + } + + var enableAcceleratedNetworking, enableIPForwarding, primary bool + if v.EnableAcceleratedNetworking != nil { + enableAcceleratedNetworking = *v.EnableAcceleratedNetworking + } + if v.EnableIPForwarding != nil { + enableIPForwarding = *v.EnableIPForwarding + } + if v.Primary != nil { + primary = *v.Primary + } + + var dnsServers []interface{} + if settings := v.DNSSettings; settings != nil { + dnsServers = utils.FlattenStringSlice(v.DNSSettings.DNSServers) + } + + var ipConfigurations []interface{} + if v.IPConfigurations != nil { + for _, configRaw := range *v.IPConfigurations { + config := FlattenOrchestratedVirtualMachineScaleSetIPConfiguration(configRaw) + ipConfigurations = append(ipConfigurations, config) + } + } + + results = append(results, map[string]interface{}{ + "name": name, + "dns_servers": dnsServers, + "enable_accelerated_networking": enableAcceleratedNetworking, + "enable_ip_forwarding": enableIPForwarding, + "ip_configuration": ipConfigurations, + "network_security_group_id": networkSecurityGroupId, + "primary": primary, + }) + } + + return results +} + +func FlattenOrchestratedVirtualMachineScaleSetIdentity(input *compute.VirtualMachineScaleSetIdentity) ([]interface{}, error) { + if input == nil || input.Type == compute.ResourceIdentityTypeNone { + return []interface{}{}, nil + } + + identityIds := make([]string, 0) + if input.UserAssignedIdentities != nil { + for key := range input.UserAssignedIdentities { + parsedId, err := msiparse.UserAssignedIdentityIDInsensitively(key) + if err != nil { + return nil, err + } + identityIds = append(identityIds, parsedId.ID()) + } + } + + principalId := "" + if input.PrincipalID != nil { + principalId = *input.PrincipalID + } + + return []interface{}{ + map[string]interface{}{ + "type": string(input.Type), + "identity_ids": identityIds, + "principal_id": principalId, + }, + }, nil +} + +func FlattenOrchestratedVirtualMachineScaleSetDataDisk(input *[]compute.VirtualMachineScaleSetDataDisk) []interface{} { + if input == nil { + return []interface{}{} + } + + output := make([]interface{}, 0) + + for _, v := range *input { + diskSizeGb := 0 + if v.DiskSizeGB != nil && *v.DiskSizeGB != 0 { + diskSizeGb = int(*v.DiskSizeGB) + } + + lun := 0 + if v.Lun != nil { + lun = int(*v.Lun) + } + + storageAccountType := "" + diskEncryptionSetId := "" + if v.ManagedDisk != nil { + storageAccountType = string(v.ManagedDisk.StorageAccountType) + if v.ManagedDisk.DiskEncryptionSet != nil && v.ManagedDisk.DiskEncryptionSet.ID != nil { + diskEncryptionSetId = *v.ManagedDisk.DiskEncryptionSet.ID + } + } + + writeAcceleratorEnabled := false + if v.WriteAcceleratorEnabled != nil { + writeAcceleratorEnabled = *v.WriteAcceleratorEnabled + } + + iops := 0 + if v.DiskIOPSReadWrite != nil { + iops = int(*v.DiskIOPSReadWrite) + } + + mbps := 0 + if v.DiskMBpsReadWrite != nil { + mbps = int(*v.DiskMBpsReadWrite) + } + + output = append(output, map[string]interface{}{ + "caching": string(v.Caching), + "create_option": string(v.CreateOption), + "lun": lun, + "disk_encryption_set_id": diskEncryptionSetId, + "disk_size_gb": diskSizeGb, + "storage_account_type": storageAccountType, + "write_accelerator_enabled": writeAcceleratorEnabled, + "disk_iops_read_write": iops, + "disk_mbps_read_write": mbps, + }) + } + + return output +} + +// I am only commenting this out as this is going to be supported in the next release of the API in October 2021 +// func flattenWindowsConfigurationAdditionalUnattendContent(input *compute.WindowsConfiguration) []interface{} { +// if input == nil { +// return []interface{}{} +// } + +// output := make([]interface{}, 0) +// for _, v := range *input.AdditionalUnattendContent { +// // content isn't returned by the API since it's sensitive data so we need to look it up later +// // where we can pull it out of the state file. +// output = append(output, map[string]interface{}{ +// "content": "", +// "setting": string(v.SettingName), +// }) +// } + +// return output +// } + +func FlattenOrchestratedVirtualMachineScaleSetOSDisk(input *compute.VirtualMachineScaleSetOSDisk) []interface{} { + if input == nil { + return []interface{}{} + } + + diffDiskSettings := make([]interface{}, 0) + if input.DiffDiskSettings != nil { + diffDiskSettings = append(diffDiskSettings, map[string]interface{}{ + "option": string(input.DiffDiskSettings.Option), + }) + } + + diskSizeGb := 0 + if input.DiskSizeGB != nil && *input.DiskSizeGB != 0 { + diskSizeGb = int(*input.DiskSizeGB) + } + + storageAccountType := "" + diskEncryptionSetId := "" + if input.ManagedDisk != nil { + storageAccountType = string(input.ManagedDisk.StorageAccountType) + if input.ManagedDisk.DiskEncryptionSet != nil && input.ManagedDisk.DiskEncryptionSet.ID != nil { + diskEncryptionSetId = *input.ManagedDisk.DiskEncryptionSet.ID + } + } + + writeAcceleratorEnabled := false + if input.WriteAcceleratorEnabled != nil { + writeAcceleratorEnabled = *input.WriteAcceleratorEnabled + } + + return []interface{}{ + map[string]interface{}{ + "caching": string(input.Caching), + "disk_size_gb": diskSizeGb, + "diff_disk_settings": diffDiskSettings, + "storage_account_type": storageAccountType, + "write_accelerator_enabled": writeAcceleratorEnabled, + "disk_encryption_set_id": diskEncryptionSetId, + }, + } +} + +func FlattenOrchestratedVirtualMachineScaleSetScheduledEventsProfile(input *compute.ScheduledEventsProfile) []interface{} { + // if enabled is set to false, there will be no ScheduledEventsProfile in response, to avoid plan non empty when + // a user explicitly set enabled to false, we need to assign a default block to this field + + enabled := false + if input != nil && input.TerminateNotificationProfile != nil && input.TerminateNotificationProfile.Enable != nil { + enabled = *input.TerminateNotificationProfile.Enable + } + + timeout := "PT5M" + if input != nil && input.TerminateNotificationProfile != nil && input.TerminateNotificationProfile.NotBeforeTimeout != nil { + timeout = *input.TerminateNotificationProfile.NotBeforeTimeout + } + + return []interface{}{ + map[string]interface{}{ + "enabled": enabled, + "timeout": timeout, + }, + } +} + +func FlattenOrchestratedVirtualMachineScaleSetAutomaticRepairsPolicy(input *compute.AutomaticRepairsPolicy) []interface{} { + // if enabled is set to false, there will be no AutomaticRepairsPolicy in response, to avoid plan non empty when + // a user explicitly set enabled to false, we need to assign a default block to this field + + enabled := false + if input != nil && input.Enabled != nil { + enabled = *input.Enabled + } + + gracePeriod := "PT30M" + if input != nil && input.GracePeriod != nil { + gracePeriod = *input.GracePeriod + } + + return []interface{}{ + map[string]interface{}{ + "enabled": enabled, + "grace_period": gracePeriod, + }, + } +} diff --git a/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go b/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go index 2c34714429c4..f748ba9f739d 100644 --- a/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go +++ b/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go @@ -3,15 +3,16 @@ package compute import ( "fmt" "log" + "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/location" "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" + computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/suppress" @@ -22,9 +23,9 @@ import ( func resourceOrchestratedVirtualMachineScaleSet() *pluginsdk.Resource { return &pluginsdk.Resource{ - Create: resourceOrchestratedVirtualMachineScaleSetCreateUpdate, + Create: resourceOrchestratedVirtualMachineScaleSetCreate, Read: resourceOrchestratedVirtualMachineScaleSetRead, - Update: resourceOrchestratedVirtualMachineScaleSetCreateUpdate, + Update: resourceOrchestratedVirtualMachineScaleSetUpdate, Delete: resourceOrchestratedVirtualMachineScaleSetDelete, Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { @@ -33,71 +34,180 @@ func resourceOrchestratedVirtualMachineScaleSet() *pluginsdk.Resource { }, importOrchestratedVirtualMachineScaleSet), Timeouts: &pluginsdk.ResourceTimeout{ - Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Create: pluginsdk.DefaultTimeout(60 * time.Minute), Read: pluginsdk.DefaultTimeout(5 * time.Minute), - Update: pluginsdk.DefaultTimeout(30 * time.Minute), - Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), }, + // TODO: remove support the the legacy Orchestrated Virtual Machine Scale Set in 3.0 + // TODO: exposing requireGuestProvisionSignal once it's available + // https://github.com/Azure/azure-rest-api-specs/pull/7246 + Schema: map[string]*pluginsdk.Schema{ "name": { Type: pluginsdk.TypeString, Required: true, ForceNew: true, - ValidateFunc: validate.VirtualMachineName, + ValidateFunc: computeValidate.VirtualMachineName, }, "resource_group_name": azure.SchemaResourceGroupName(), "location": azure.SchemaLocation(), + "network_interface": OrchestratedVirtualMachineScaleSetNetworkInterfaceSchema(), + + "os_disk": OrchestratedVirtualMachineScaleSetOSDiskSchema(), + + "instances": { + Type: pluginsdk.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + + // For sku I will create a format like: tier_sku name. + // NOTE: all of the exposed vm sku tier's are Standard so this will continue to be hardcoded + // Examples: Standard_HC44rs_4, Standard_D48_v3_6, Standard_M64s_20, Standard_HB120-96rs_v3_8 + "sku_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: azure.ValidateOrchestratedVirtualMachineScaleSetSku, + }, + + "os_profile": OrchestratedVirtualMachineScaleSetOSProfileSchema(), + + // Optional + "automatic_instance_repair": OrchestratedVirtualMachineScaleSetAutomaticRepairsPolicySchema(), + + "boot_diagnostics": bootDiagnosticsSchema(), + + "data_disk": OrchestratedVirtualMachineScaleSetDataDiskSchema(), + + "encryption_at_host_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + }, + + "eviction_policy": { + // only applicable when `priority` is set to `Spot` + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(compute.VirtualMachineEvictionPolicyTypesDeallocate), + string(compute.VirtualMachineEvictionPolicyTypesDelete), + }, false), + }, + + // Due to bug in RP extensions cannot curretntly be supported in Terraform ETA for full support is mid Jan 2022 + // "extension": OrchestratedVirtualMachineScaleSetExtensionsSchema(), + + // "extensions_time_budget": { + // Type: pluginsdk.TypeString, + // Optional: true, + // Default: "PT1H30M", + // ValidateFunc: validate.ISO8601DurationBetween("PT15M", "PT2H"), + // }, + + "identity": OrchestratedVirtualMachineScaleSetIdentitySchema(), + + "license_type": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "None", + "Windows_Client", + "Windows_Server", + }, false), + DiffSuppressFunc: func(_, old, new string, _ *pluginsdk.ResourceData) bool { + if old == "None" && new == "" || old == "" && new == "None" { + return true + } + + return false + }, + }, + + "max_bid_price": { + Type: pluginsdk.TypeFloat, + Optional: true, + Default: -1, + ValidateFunc: computeValidate.SpotMaxPrice, + }, + + "plan": planSchema(), + "platform_fault_domain_count": { Type: pluginsdk.TypeInt, Required: true, ForceNew: true, - // The range of this value varies in different locations - ValidateFunc: validation.IntBetween(0, 5), + }, + + "priority": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: string(compute.VirtualMachinePriorityTypesRegular), + ValidateFunc: validation.StringInSlice([]string{ + string(compute.VirtualMachinePriorityTypesRegular), + string(compute.VirtualMachinePriorityTypesSpot), + }, false), }, "proximity_placement_group_id": { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validate.ProximityPlacementGroupID, - // the Compute/VM API is broken and returns the Resource Group name in UPPERCASE :shrug:, github issue: https://github.com/Azure/azure-rest-api-specs/issues/10016 + ValidateFunc: azure.ValidateResourceID, + // the Compute API is broken and returns the Resource Group name in UPPERCASE :shrug:, github issue: https://github.com/Azure/azure-rest-api-specs/issues/10016 DiffSuppressFunc: suppress.CaseDifference, }, - "single_placement_group": { + // removing single_placement_group since it has been retired as of version 2019-12-01 for Flex VMSS + "source_image_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "source_image_reference": sourceImageReferenceSchema(false), + + "zone_balance": { Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, }, - // the VMO mode can only be deployed into one zone for now, and its zone will also be assigned to all its VM instances - "zones": azure.SchemaSingleZone(), + "termination_notification": OrchestratedVirtualMachineScaleSetTerminateNotificationSchema(), + + "zones": azure.SchemaZones(), + "tags": tags.Schema(), + + // Computed "unique_id": { Type: pluginsdk.TypeString, Computed: true, }, - - "tags": tags.Schema(), }, } } -func resourceOrchestratedVirtualMachineScaleSetCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { +func resourceOrchestratedVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient - ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() + isLegacy := true resourceGroup := d.Get("resource_group_name").(string) name := d.Get("name").(string) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, name) + // Upgrading to the 2021-07-01 exposed a new expand parameter to the GET method + existing, err := client.Get(ctx, resourceGroup, name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { return fmt.Errorf("checking for existing Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) @@ -109,14 +219,31 @@ func resourceOrchestratedVirtualMachineScaleSetCreateUpdate(d *pluginsdk.Resourc } } + location := azure.NormalizeLocation(d.Get("location").(string)) + t := d.Get("tags").(map[string]interface{}) + zones := azure.ExpandZones(d.Get("zones").([]interface{})) + props := compute.VirtualMachineScaleSet{ - Location: utils.String(location.Normalize(d.Get("location").(string))), - Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + Location: utils.String(location), + Tags: tags.Expand(t), VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ PlatformFaultDomainCount: utils.Int32(int32(d.Get("platform_fault_domain_count").(int))), - SinglePlacementGroup: utils.Bool(d.Get("single_placement_group").(bool)), + SinglePlacementGroup: utils.Bool(false), + // OrchestrationMode needs to be hardcoded to Uniform, for the + // standard VMSS resource, since virtualMachineProfile is now supported + // in both VMSS and Orchestrated VMSS... + OrchestrationMode: compute.OrchestrationModeFlexible, }, - Zones: azure.ExpandZones(d.Get("zones").([]interface{})), + Zones: zones, + } + + virtualMachineProfile := compute.VirtualMachineScaleSetVMProfile{ + StorageProfile: &compute.VirtualMachineScaleSetStorageProfile{}, + } + + networkProfile := &compute.VirtualMachineScaleSetNetworkProfile{ + // 2020-11-01 is the only valid value for this value and is only valid for VMSS in Orchestration Mode flex + NetworkAPIVersion: compute.NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne, } if v, ok := d.GetOk("proximity_placement_group_id"); ok { @@ -125,28 +252,578 @@ func resourceOrchestratedVirtualMachineScaleSetCreateUpdate(d *pluginsdk.Resourc } } + instances := d.Get("instances").(int) + if v, ok := d.GetOk("sku_name"); ok { + isLegacy = false + sku, err := azure.ExpandOrchestratedVirtualMachineScaleSetSku(v.(string), instances) + if err != nil { + return fmt.Errorf("expanding 'sku_name': %+v", err) + } + props.Sku = sku + } + + osType := compute.OperatingSystemTypesWindows + var winConfigRaw []interface{} + var linConfigRaw []interface{} + var vmssOsProfile *compute.VirtualMachineScaleSetOSProfile + osProfileRaw := d.Get("os_profile").([]interface{}) + + if len(osProfileRaw) > 0 { + osProfile := osProfileRaw[0].(map[string]interface{}) + winConfigRaw = osProfile["windows_configuration"].([]interface{}) + linConfigRaw = osProfile["linux_configuration"].([]interface{}) + customData := "" + + // Pass custom data if it is defined in the config file + if v := osProfile["custom_data"]; v != nil { + customData = v.(string) + } + + if len(winConfigRaw) > 0 { + winConfig := winConfigRaw[0].(map[string]interface{}) + vmssOsProfile = expandOrchestratedVirtualMachineScaleSetOsProfileWithWindowsConfiguration(winConfig, customData) + + // if the Computer Prefix Name was not defined use the computer name + if vmssOsProfile.ComputerNamePrefix == nil || len(*vmssOsProfile.ComputerNamePrefix) == 0 { + // validate that the computer name is a valid Computer Prefix Name + _, errs := computeValidate.WindowsComputerNamePrefix(name, "computer_name_prefix") + if len(errs) > 0 { + return fmt.Errorf("unable to assume default computer name prefix %s. Please adjust the %q, or specify an explicit %q", errs[0], "name", "computer_name_prefix") + } + vmssOsProfile.ComputerNamePrefix = utils.String(name) + } + } + + if len(linConfigRaw) > 0 { + osType = compute.OperatingSystemTypesLinux + linConfig := linConfigRaw[0].(map[string]interface{}) + vmssOsProfile = expandOrchestratedVirtualMachineScaleSetOsProfileWithLinuxConfiguration(linConfig, customData) + + // if the Computer Prefix Name was not defined use the computer name + if len(*vmssOsProfile.ComputerNamePrefix) == 0 { + // validate that the computer name is a valid Computer Prefix Name + _, errs := computeValidate.LinuxComputerNamePrefix(name, "computer_name_prefix") + if len(errs) > 0 { + return fmt.Errorf("unable to assume default computer name prefix %s. Please adjust the %q, or specify an explicit %q", errs[0], "name", "computer_name_prefix") + } + vmssOsProfile.ComputerNamePrefix = utils.String(name) + } + } + + virtualMachineProfile.OsProfile = vmssOsProfile + } + + if v, ok := d.GetOk("boot_diagnostics"); ok { + virtualMachineProfile.DiagnosticsProfile = expandBootDiagnostics(v.([]interface{})) + } + + if v, ok := d.GetOk("priority"); ok { + virtualMachineProfile.Priority = compute.VirtualMachinePriorityTypes(v.(string)) + } + + if v, ok := d.GetOk("os_disk"); ok { + virtualMachineProfile.StorageProfile.OsDisk = ExpandOrchestratedVirtualMachineScaleSetOSDisk(v.([]interface{}), osType) + } + + if v, ok := d.GetOk("source_image_reference"); ok { + sourceImageId := "" + if sid, ok := d.GetOk("source_image_id"); ok { + sourceImageId = sid.(string) + } + sourceImageReference, err := expandSourceImageReference(v.([]interface{}), sourceImageId) + if err != nil { + return err + } + virtualMachineProfile.StorageProfile.ImageReference = sourceImageReference + } + + if v, ok := d.GetOk("data_disk"); ok { + ultraSSDEnabled := false // Currently not supported in orchestrated VMSS + dataDisks, err := ExpandVirtualMachineScaleSetDataDisk(v.([]interface{}), ultraSSDEnabled) + if err != nil { + return fmt.Errorf("expanding `data_disk`: %+v", err) + } + virtualMachineProfile.StorageProfile.DataDisks = dataDisks + } + + if v, ok := d.GetOk("network_interface"); ok { + networkInterfaces, err := ExpandOrchestratedVirtualMachineScaleSetNetworkInterface(v.([]interface{})) + if err != nil { + return fmt.Errorf("expanding `network_interface`: %+v", err) + } + + networkProfile.NetworkInterfaceConfigurations = networkInterfaces + virtualMachineProfile.NetworkProfile = networkProfile + } + + // if v, ok := d.GetOk("extension"); ok { + // extensionProfile, err := expandOrchestratedVirtualMachineScaleSetExtensions(v.(*pluginsdk.Set).List()) + // if err != nil { + // return err + // } + // virtualMachineProfile.ExtensionProfile = extensionProfile + // } + + // if v, ok := d.GetOk("extensions_time_budget"); ok { + // if virtualMachineProfile.ExtensionProfile == nil { + // virtualMachineProfile.ExtensionProfile = &compute.VirtualMachineScaleSetExtensionProfile{} + // } + // virtualMachineProfile.ExtensionProfile.ExtensionsTimeBudget = utils.String(v.(string)) + // } + + if v, ok := d.Get("max_bid_price").(float64); ok && v > 0 { + if virtualMachineProfile.Priority != compute.VirtualMachinePriorityTypesSpot { + return fmt.Errorf("`max_bid_price` can only be configured when `priority` is set to `Spot`") + } + + virtualMachineProfile.BillingProfile = &compute.BillingProfile{ + MaxPrice: utils.Float(v), + } + } + + if v, ok := d.GetOk("encryption_at_host_enabled"); ok { + virtualMachineProfile.SecurityProfile = &compute.SecurityProfile{ + EncryptionAtHost: utils.Bool(v.(bool)), + } + } + + if v, ok := d.GetOk("eviction_policy"); ok { + if virtualMachineProfile.Priority != compute.VirtualMachinePriorityTypesSpot { + return fmt.Errorf("an `eviction_policy` can only be specified when `priority` is set to `Spot`") + } + virtualMachineProfile.EvictionPolicy = compute.VirtualMachineEvictionPolicyTypes(v.(string)) + } else if virtualMachineProfile.Priority == compute.VirtualMachinePriorityTypesSpot { + return fmt.Errorf("an `eviction_policy` must be specified when `priority` is set to `Spot`") + } + + if v, ok := d.GetOk("license_type"); ok { + virtualMachineProfile.LicenseType = utils.String(v.(string)) + } + + if v, ok := d.GetOk("termination_notification"); ok { + virtualMachineProfile.ScheduledEventsProfile = ExpandVirtualMachineScaleSetScheduledEventsProfile(v.([]interface{})) + } + + // Only inclued the virtual machine profile if this is not a legacy configuration + if !isLegacy { + if v, ok := d.GetOk("plan"); ok { + props.Plan = expandPlan(v.([]interface{})) + } + + if v, ok := d.GetOk("identity"); ok { + identity, err := ExpandVirtualMachineScaleSetIdentity(v.([]interface{})) + if err != nil { + return fmt.Errorf("expanding `identity`: %+v", err) + } + props.Identity = identity + } + + if v, ok := d.GetOk("automatic_instance_repair"); ok { + props.VirtualMachineScaleSetProperties.AutomaticRepairsPolicy = ExpandOrchestratedVirtualMachineScaleSetAutomaticRepairsPolicy(v.([]interface{})) + } + + if v, ok := d.GetOk("zone_balance"); ok && v.(bool) { + if len(*zones) == 0 { + return fmt.Errorf("`zone_balance` can only be set to `true` when zones are specified") + } + + props.VirtualMachineScaleSetProperties.ZoneBalance = utils.Bool(v.(bool)) + } + + props.VirtualMachineScaleSetProperties.VirtualMachineProfile = &virtualMachineProfile + } + + log.Printf("[DEBUG] Creating Orchestrated Virtual Machine Scale Set %q (Resource Group %q)..", name, resourceGroup) future, err := client.CreateOrUpdate(ctx, resourceGroup, name, props) if err != nil { return fmt.Errorf("creating Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) } + log.Printf("[DEBUG] Waiting for Orchestrated Virtual Machine Scale Set %q (Resource Group %q) to be created..", name, resourceGroup) if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for creation of Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) + // If it is a Retryable Error re-issue the PUT for 10 loops until either the error goes away or the limit has been reached + if strings.Contains(err.Error(), "RetryableError") { + log.Printf("[DEBUG] Retryable error hit for Orchestrated Virtual Machine Scale Set %q (Resource Group %q) to be created..", name, resourceGroup) + errCount := 1 + + for { + log.Printf("[DEBUG] Retrying PUT %d for Orchestrated Virtual Machine Scale Set %q (Resource Group %q)..", errCount, name, resourceGroup) + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, props) + if err != nil { + return fmt.Errorf("creating Orchestrated Virtual Machine Scale Set %q (Resource Group %q) after %d retries: %+v", name, resourceGroup, errCount, err) + } + + err = future.WaitForCompletionRef(ctx, client.Client) + if err != nil && strings.Contains(err.Error(), "RetryableError") { + if errCount == 10 { + return fmt.Errorf("waiting for creation of Orchestrated Virtual Machine Scale Set %q (Resource Group %q) after %d retries: %+v", name, resourceGroup, err, errCount) + } + errCount++ + } else { + if err != nil { + // Hit an error while retying that is not retryable anymore... + return fmt.Errorf("hit unretryable error waiting for creation of Orchestrated Virtual Machine Scale Set %q (Resource Group %q) after %d retries: %+v", name, resourceGroup, err, errCount) + } else { + // err is nil and finally succeeded continue with the rest of the create function... + break + } + } + } + } else { + // Not a retryable error... + return fmt.Errorf("waiting for creation of Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) + } } - resp, err := client.Get(ctx, resourceGroup, name) + log.Printf("[DEBUG] Orchestrated Virtual Machine Scale Set %q (Resource Group %q) was created", name, resourceGroup) + log.Printf("[DEBUG] Retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q)..", name, resourceGroup) + + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Get(ctx, resourceGroup, name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) } if resp.ID == nil || *resp.ID == "" { - return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): ID was empty", name, resourceGroup) + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): ID was nil", name, resourceGroup) } d.SetId(*resp.ID) return resourceOrchestratedVirtualMachineScaleSetRead(d, meta) } +func resourceOrchestratedVirtualMachineScaleSetUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Compute.VMScaleSetClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.VirtualMachineScaleSetID(d.Id()) + if err != nil { + return err + } + + isLegacy := true + updateInstances := false + + // retrieve + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + existing, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) + if err != nil { + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + if existing.Sku != nil { + isLegacy = false + } + if existing.VirtualMachineScaleSetProperties == nil { + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): `properties` was nil", id.Name, id.ResourceGroup) + } + + if !isLegacy { + if existing.VirtualMachineScaleSetProperties.VirtualMachineProfile == nil { + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): `properties.virtualMachineProfile` was nil", id.Name, id.ResourceGroup) + } + if existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile == nil { + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): `properties.virtualMachineProfile,storageProfile` was nil", id.Name, id.ResourceGroup) + } + } + + updateProps := compute.VirtualMachineScaleSetUpdateProperties{} + update := compute.VirtualMachineScaleSetUpdate{} + osType := compute.OperatingSystemTypesWindows + + if !isLegacy { + updateProps = compute.VirtualMachineScaleSetUpdateProperties{ + VirtualMachineProfile: &compute.VirtualMachineScaleSetUpdateVMProfile{ + // if an image reference has been configured previously (it has to be), we would better to include that in this + // update request to avoid some circumstances that the API will complain ImageReference is null + // issue tracking: https://github.com/Azure/azure-rest-api-specs/issues/10322 + StorageProfile: &compute.VirtualMachineScaleSetUpdateStorageProfile{ + ImageReference: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.ImageReference, + }, + }, + // Currently not suppored in orchestrated VMSS + // if an upgrade policy's been configured previously (which it will have) it must be threaded through + // this doesn't matter for Manual - but breaks when updating anything on a Automatic and Rolling Mode Scale Set + // UpgradePolicy: existing.VirtualMachineScaleSetProperties.UpgradePolicy, + } + + priority := compute.VirtualMachinePriorityTypes(d.Get("priority").(string)) + if d.HasChange("max_bid_price") { + if priority != compute.VirtualMachinePriorityTypesSpot { + return fmt.Errorf("`max_bid_price` can only be configured when `priority` is set to `Spot`") + } + + updateProps.VirtualMachineProfile.BillingProfile = &compute.BillingProfile{ + MaxPrice: utils.Float(d.Get("max_bid_price").(float64)), + } + } + + osProfileRaw := d.Get("os_profile").([]interface{}) + vmssOsProfile := compute.VirtualMachineScaleSetUpdateOSProfile{} + windowsConfig := compute.WindowsConfiguration{} + linuxConfig := compute.LinuxConfiguration{} + + if len(osProfileRaw) > 0 { + osProfile := osProfileRaw[0].(map[string]interface{}) + winConfigRaw := osProfile["windows_configuration"].([]interface{}) + linConfigRaw := osProfile["linux_configuration"].([]interface{}) + + if d.HasChange("os_profile.0.custom_data") { + updateInstances = true + + // customData can only be sent if it's a base64 encoded string, + // so it's not possible to remove this without tainting the resource + vmssOsProfile.CustomData = utils.String(osProfile["custom_data"].(string)) + } + + if len(winConfigRaw) > 0 { + winConfig := winConfigRaw[0].(map[string]interface{}) + + if d.HasChange("os_profile.0.windows_configuration.0.enable_automatic_updates") || + d.HasChange("os_profile.0.windows_configuration.0.provision_vm_agent") || + d.HasChange("os_profile.0.windows_configuration.0.timezone") || + d.HasChange("os_profile.0.windows_configuration.0.secret") || + d.HasChange("os_profile.0.windows_configuration.0.winrm_listener") { + updateInstances = true + } + + if d.HasChange("os_profile.0.windows_configuration.0.enable_automatic_updates") { + windowsConfig.EnableAutomaticUpdates = utils.Bool(winConfig["enable_automatic_updates"].(bool)) + } + + if d.HasChange("os_profile.0.windows_configuration.0.provision_vm_agent") { + windowsConfig.ProvisionVMAgent = utils.Bool(winConfig["provision_vm_agent"].(bool)) + } + + if d.HasChange("os_profile.0.windows_configuration.0.timezone") { + windowsConfig.TimeZone = utils.String(winConfig["timezone"].(string)) + } + + if d.HasChange("os_profile.0.windows_configuration.0.secret") { + vmssOsProfile.Secrets = expandWindowsSecrets(winConfig["secret"].([]interface{})) + } + + if d.HasChange("os_profile.0.windows_configuration.0.winrm_listener") { + winRmListenersRaw := winConfig["winrm_listener"].(*pluginsdk.Set).List() + vmssOsProfile.WindowsConfiguration.WinRM = expandWinRMListener(winRmListenersRaw) + } + + vmssOsProfile.WindowsConfiguration = &windowsConfig + } + + if len(linConfigRaw) > 0 { + osType = compute.OperatingSystemTypesLinux + linConfig := linConfigRaw[0].(map[string]interface{}) + + if d.HasChange("os_profile.0.linux_configuration.0.provision_vm_agent") || + d.HasChange("os_profile.0.linux_configuration.0.disable_password_authentication") || + d.HasChange("os_profile.0.linux_configuration.0.admin_ssh_key") { + updateInstances = true + } + + if d.HasChange("os_profile.0.linux_configuration.0.provision_vm_agent") { + linuxConfig.ProvisionVMAgent = utils.Bool(linConfig["provision_vm_agent"].(bool)) + } + + if d.HasChange("os_profile.0.linux_configuration.0.disable_password_authentication") { + linuxConfig.DisablePasswordAuthentication = utils.Bool(linConfig["disable_password_authentication"].(bool)) + } + + if d.HasChange("os_profile.0.linux_configuration.0.admin_ssh_key") { + sshPublicKeys := ExpandSSHKeys(linConfig["admin_ssh_key"].(*pluginsdk.Set).List()) + if linuxConfig.SSH == nil { + linuxConfig.SSH = &compute.SSHConfiguration{} + } + linuxConfig.SSH.PublicKeys = &sshPublicKeys + } + + vmssOsProfile.LinuxConfiguration = &linuxConfig + } + + updateProps.VirtualMachineProfile.OsProfile = &vmssOsProfile + } + + if d.HasChange("data_disk") || d.HasChange("os_disk") || d.HasChange("source_image_id") || d.HasChange("source_image_reference") { + updateInstances = true + + if updateProps.VirtualMachineProfile.StorageProfile == nil { + updateProps.VirtualMachineProfile.StorageProfile = &compute.VirtualMachineScaleSetUpdateStorageProfile{} + } + + if d.HasChange("data_disk") { + ultraSSDEnabled := false // Currently not supported in orchestrated vmss + dataDisks, err := ExpandOrchestratedVirtualMachineScaleSetDataDisk(d.Get("data_disk").([]interface{}), ultraSSDEnabled) + if err != nil { + return fmt.Errorf("expanding `data_disk`: %+v", err) + } + updateProps.VirtualMachineProfile.StorageProfile.DataDisks = dataDisks + } + + if d.HasChange("os_disk") { + osDiskRaw := d.Get("os_disk").([]interface{}) + updateProps.VirtualMachineProfile.StorageProfile.OsDisk = ExpandOrchestratedVirtualMachineScaleSetOSDiskUpdate(osDiskRaw) + } + + if d.HasChange("source_image_id") || d.HasChange("source_image_reference") { + sourceImageReferenceRaw := d.Get("source_image_reference").([]interface{}) + sourceImageId := d.Get("source_image_id").(string) + sourceImageReference, err := expandSourceImageReference(sourceImageReferenceRaw, sourceImageId) + if err != nil { + return err + } + + // Must include all storage profile properties when updating disk image. See: https://github.com/hashicorp/terraform-provider-azurerm/issues/8273 + updateProps.VirtualMachineProfile.StorageProfile.DataDisks = existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.DataDisks + updateProps.VirtualMachineProfile.StorageProfile.ImageReference = sourceImageReference + updateProps.VirtualMachineProfile.StorageProfile.OsDisk = &compute.VirtualMachineScaleSetUpdateOSDisk{ + Caching: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.Caching, + WriteAcceleratorEnabled: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.WriteAcceleratorEnabled, + DiskSizeGB: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.DiskSizeGB, + Image: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.Image, + VhdContainers: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.VhdContainers, + ManagedDisk: existing.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.ManagedDisk, + } + } + } + + if d.HasChange("network_interface") { + networkInterfacesRaw := d.Get("network_interface").([]interface{}) + networkInterfaces, err := ExpandOrchestratedVirtualMachineScaleSetNetworkInterfaceUpdate(networkInterfacesRaw) + if err != nil { + return fmt.Errorf("expanding `network_interface`: %+v", err) + } + + updateProps.VirtualMachineProfile.NetworkProfile = &compute.VirtualMachineScaleSetUpdateNetworkProfile{ + NetworkInterfaceConfigurations: networkInterfaces, + // 2020-11-01 is the only valid value for this value and is only valid for VMSS in Orchestration Mode flex + NetworkAPIVersion: compute.NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne, + } + } + + if d.HasChange("boot_diagnostics") { + updateInstances = true + + bootDiagnosticsRaw := d.Get("boot_diagnostics").([]interface{}) + updateProps.VirtualMachineProfile.DiagnosticsProfile = expandBootDiagnostics(bootDiagnosticsRaw) + } + + if d.HasChange("termination_notification") { + notificationRaw := d.Get("termination_notification").([]interface{}) + updateProps.VirtualMachineProfile.ScheduledEventsProfile = ExpandOrchestratedVirtualMachineScaleSetScheduledEventsProfile(notificationRaw) + } + + if d.HasChange("encryption_at_host_enabled") { + updateProps.VirtualMachineProfile.SecurityProfile = &compute.SecurityProfile{ + EncryptionAtHost: utils.Bool(d.Get("encryption_at_host_enabled").(bool)), + } + } + + if d.HasChange("license_type") { + license := d.Get("license_type").(string) + if license == "" { + // Only for create no specification is possible in the API. API does not allow empty string in update. + // So removing attribute license_type from Terraform configuration if it was set to value other than 'None' would lead to an endless loop in apply. + // To allow updating in this case set value explicitly to 'None'. + license = "None" + } + updateProps.VirtualMachineProfile.LicenseType = &license + } + + if d.HasChange("automatic_instance_repair") { + automaticRepairsPolicyRaw := d.Get("automatic_instance_repair").([]interface{}) + automaticRepairsPolicy := ExpandOrchestratedVirtualMachineScaleSetAutomaticRepairsPolicy(automaticRepairsPolicyRaw) + updateProps.AutomaticRepairsPolicy = automaticRepairsPolicy + } + + if d.HasChange("identity") { + identityRaw := d.Get("identity").([]interface{}) + identity, err := ExpandOrchestratedVirtualMachineScaleSetIdentity(identityRaw) + if err != nil { + return fmt.Errorf("expanding `identity`: %+v", err) + } + + update.Identity = identity + } + + if d.HasChange("plan") { + planRaw := d.Get("plan").([]interface{}) + update.Plan = expandPlan(planRaw) + } + + if d.HasChange("sku") || d.HasChange("instances") { + // in-case ignore_changes is being used, since both fields are required + // look up the current values and override them as needed + sku := existing.Sku + instances := int(*sku.Capacity) + + if d.HasChange("instances") { + instances = d.Get("instances").(int) + } + + if d.HasChange("sku_name") { + updateInstances = true + + sku, err = azure.ExpandOrchestratedVirtualMachineScaleSetSku(d.Get("sku_name").(string), instances) + if err != nil { + return err + } + } + + update.Sku = sku + } + + // if d.HasChanges("extension", "extensions_time_budget") { + // updateInstances = true + + // extensionProfile, err := expandOrchestratedVirtualMachineScaleSetExtensions(d.Get("extension").(*pluginsdk.Set).List()) + // if err != nil { + // return err + // } + // updateProps.VirtualMachineProfile.ExtensionProfile = extensionProfile + // updateProps.VirtualMachineProfile.ExtensionProfile.ExtensionsTimeBudget = utils.String(d.Get("extensions_time_budget").(string)) + // } + } + + // Only two fields that can change in legacy mode + if d.HasChange("proximity_placement_group_id") { + if v, ok := d.GetOk("proximity_placement_group_id"); ok { + updateInstances = true + updateProps.ProximityPlacementGroup = &compute.SubResource{ + ID: utils.String(v.(string)), + } + } + } + + if d.HasChange("tags") { + update.Tags = tags.Expand(d.Get("tags").(map[string]interface{})) + } + + update.VirtualMachineScaleSetUpdateProperties = &updateProps + + if updateInstances { + log.Printf("[DEBUG] Orchestrated Virtual Machine Scale Set %q in Resource Group %q - updateInstances is true", id.Name, id.ResourceGroup) + } + + // AutomaticOSUpgradeIsEnabled currently is not supported in orchestrated VMSS flex + metaData := virtualMachineScaleSetUpdateMetaData{ + AutomaticOSUpgradeIsEnabled: false, + // CanRollInstancesWhenRequired: meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired, + // UpdateInstances: updateInstances, + CanRollInstancesWhenRequired: false, + UpdateInstances: false, + Client: meta.(*clients.Client).Compute, + Existing: existing, + ID: id, + OSType: osType, + } + + if err := metaData.performUpdate(ctx, update); err != nil { + return err + } + + return resourceOrchestratedVirtualMachineScaleSetRead(d, meta) +} + func resourceOrchestratedVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -157,7 +834,8 @@ func resourceOrchestratedVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, m return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if utils.ResponseWasNotFound(resp.Response) { log.Printf("[DEBUG] Orchestrated Virtual Machine Scale Set %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) @@ -172,15 +850,131 @@ func resourceOrchestratedVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, m d.Set("resource_group_name", id.ResourceGroup) d.Set("location", location.NormalizeNilable(resp.Location)) - if props := resp.VirtualMachineScaleSetProperties; props != nil { - d.Set("platform_fault_domain_count", props.PlatformFaultDomainCount) - d.Set("single_placement_group", props.SinglePlacementGroup) - proximityPlacementGroupID := "" - if props.ProximityPlacementGroup != nil && props.ProximityPlacementGroup.ID != nil { - proximityPlacementGroupID = *props.ProximityPlacementGroup.ID + var skuName *string + var instances int + if resp.Sku != nil { + skuName, err = azure.FlattenOrchestratedVirtualMachineScaleSetSku(resp.Sku) + if err != nil || skuName == nil { + return fmt.Errorf("setting `sku_name`: %+v", err) + } + + if resp.Sku.Capacity != nil { + instances = int(*resp.Sku.Capacity) + } + + d.Set("sku_name", skuName) + d.Set("instances", instances) + } + + identity, err := FlattenOrchestratedVirtualMachineScaleSetIdentity(resp.Identity) + if err != nil { + return err + } + if err := d.Set("identity", identity); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } + + if err := d.Set("plan", flattenPlan(resp.Plan)); err != nil { + return fmt.Errorf("setting `plan`: %+v", err) + } + + if resp.VirtualMachineScaleSetProperties == nil { + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): `properties` was nil", id.Name, id.ResourceGroup) + } + props := *resp.VirtualMachineScaleSetProperties + + if err := d.Set("automatic_instance_repair", FlattenOrchestratedVirtualMachineScaleSetAutomaticRepairsPolicy(props.AutomaticRepairsPolicy)); err != nil { + return fmt.Errorf("setting `automatic_instance_repair`: %+v", err) + } + + d.Set("platform_fault_domain_count", props.PlatformFaultDomainCount) + proximityPlacementGroupId := "" + if props.ProximityPlacementGroup != nil && props.ProximityPlacementGroup.ID != nil { + proximityPlacementGroupId = *props.ProximityPlacementGroup.ID + } + d.Set("proximity_placement_group_id", proximityPlacementGroupId) + d.Set("unique_id", props.UniqueID) + d.Set("zone_balance", props.ZoneBalance) + + if profile := props.VirtualMachineProfile; profile != nil { + if err := d.Set("boot_diagnostics", flattenBootDiagnostics(profile.DiagnosticsProfile)); err != nil { + return fmt.Errorf("setting `boot_diagnostics`: %+v", err) + } + + // defaulted since BillingProfile isn't returned if it's unset + maxBidPrice := float64(-1.0) + if profile.BillingProfile != nil && profile.BillingProfile.MaxPrice != nil { + maxBidPrice = *profile.BillingProfile.MaxPrice + } + d.Set("max_bid_price", maxBidPrice) + + d.Set("eviction_policy", string(profile.EvictionPolicy)) + d.Set("license_type", profile.LicenseType) + + // the service just return empty when this is not assigned when provisioned + // See discussion on https://github.com/Azure/azure-rest-api-specs/issues/10971 + priority := compute.VirtualMachinePriorityTypesRegular + if profile.Priority != "" { + priority = profile.Priority + } + d.Set("priority", priority) + + if storageProfile := profile.StorageProfile; storageProfile != nil { + if err := d.Set("os_disk", FlattenOrchestratedVirtualMachineScaleSetOSDisk(storageProfile.OsDisk)); err != nil { + return fmt.Errorf("setting `os_disk`: %+v", err) + } + + if err := d.Set("data_disk", FlattenOrchestratedVirtualMachineScaleSetDataDisk(storageProfile.DataDisks)); err != nil { + return fmt.Errorf("setting `data_disk`: %+v", err) + } + + if err := d.Set("source_image_reference", flattenSourceImageReference(storageProfile.ImageReference)); err != nil { + return fmt.Errorf("setting `source_image_reference`: %+v", err) + } + + var storageImageId string + if storageProfile.ImageReference != nil && storageProfile.ImageReference.ID != nil { + storageImageId = *storageProfile.ImageReference.ID + } + d.Set("source_image_id", storageImageId) + } + + if osProfile := profile.OsProfile; osProfile != nil { + if err := d.Set("os_profile", FlattenOrchestratedVirtualMachineScaleSetOSProfile(osProfile, d)); err != nil { + return fmt.Errorf("setting `os_profile`: %+v", err) + } + } + + if nwProfile := profile.NetworkProfile; nwProfile != nil { + flattenedNics := FlattenOrchestratedVirtualMachineScaleSetNetworkInterface(nwProfile.NetworkInterfaceConfigurations) + if err := d.Set("network_interface", flattenedNics); err != nil { + return fmt.Errorf("setting `network_interface`: %+v", err) + } + } + + if scheduleProfile := profile.ScheduledEventsProfile; scheduleProfile != nil { + if err := d.Set("termination_notification", FlattenOrchestratedVirtualMachineScaleSetScheduledEventsProfile(scheduleProfile)); err != nil { + return fmt.Errorf("setting `termination_notification`: %+v", err) + } + } + + // extensionProfile, err := flattenOrchestratedVirtualMachineScaleSetExtensions(profile.ExtensionProfile, d) + // if err != nil { + // return fmt.Errorf("failed flattening `extension`: %+v", err) + // } + // d.Set("extension", extensionProfile) + + // extensionsTimeBudget := "PT1H30M" + // if profile.ExtensionProfile != nil && profile.ExtensionProfile.ExtensionsTimeBudget != nil { + // extensionsTimeBudget = *profile.ExtensionProfile.ExtensionsTimeBudget + // } + // d.Set("extensions_time_budget", extensionsTimeBudget) + + encryptionAtHostEnabled := false + if profile.SecurityProfile != nil && profile.SecurityProfile.EncryptionAtHost != nil { + encryptionAtHostEnabled = *profile.SecurityProfile.EncryptionAtHost } - d.Set("proximity_placement_group_id", proximityPlacementGroupID) - d.Set("unique_id", props.UniqueID) + d.Set("encryption_at_host_enabled", encryptionAtHostEnabled) } if err := d.Set("zones", resp.Zones); err != nil { @@ -200,7 +994,46 @@ func resourceOrchestratedVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, return err } - // @ArcturusZhang (mimicking from linux_virtual_machine_pluginsdk.go): sending `nil` here omits this value from being sent + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return fmt.Errorf("retrieving Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + + // Sometimes VMSS's aren't fully deleted when the `Delete` call returns - as such we'll try to scale the cluster + // to 0 nodes first, then delete the cluster - which should ensure there's no Network Interfaces kicking around + // and work around this Azure API bug: + // Original Error: Code="InUseSubnetCannotBeDeleted" Message="Subnet internal is in use by + // /{nicResourceID}/|providers|Microsoft.Compute|virtualMachineScaleSets|acctestvmss-190923101253410278|virtualMachines|0|networkInterfaces|example/ipConfigurations/internal and cannot be deleted. + // In order to delete the subnet, delete all the resources within the subnet. See aka.ms/deletesubnet. + if resp.Sku != nil { + resp.Sku.Capacity = utils.Int64(int64(0)) + + log.Printf("[DEBUG] Scaling instances to 0 prior to deletion - this helps avoids networking issues within Azure") + update := compute.VirtualMachineScaleSetUpdate{ + Sku: resp.Sku, + } + future, err := client.Update(ctx, id.ResourceGroup, id.Name, update) + if err != nil { + return fmt.Errorf("updating number of instances in Orchestrated Virtual Machine Scale Set %q (Resource Group %q) to scale to 0: %+v", id.Name, id.ResourceGroup, err) + } + + log.Printf("[DEBUG] Waiting for scaling of instances to 0 prior to deletion - this helps avoids networking issues within Azure") + err = future.WaitForCompletionRef(ctx, client.Client) + if err != nil { + return fmt.Errorf("waiting for number of instances in Orchestrated Virtual Machine Scale Set %q (Resource Group %q) to scale to 0: %+v", id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Scaled instances to 0 prior to deletion - this helps avoids networking issues within Azure") + } else { + log.Printf("[DEBUG] Unable to scale instances to `0` since the `sku` block is nil - trying to delete anyway") + } + + log.Printf("[DEBUG] Deleting Orchestrated Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) + // @ArcturusZhang (mimicking from windows_virtual_machine_pluginsdk.go): sending `nil` here omits this value from being sent // which matches the previous behaviour - we're only splitting this out so it's clear why // TODO: support force deletion once it's out of Preview, if applicable var forceDeletion *bool = nil @@ -209,9 +1042,11 @@ func resourceOrchestratedVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, return fmt.Errorf("deleting Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } + log.Printf("[DEBUG] Waiting for deletion of Orchestrated Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { return fmt.Errorf("waiting for deletion of Orchestrated Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } + log.Printf("[DEBUG] Deleted Orchestrated Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup) return nil } diff --git a/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go b/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go index 68cdbdd7fb12..e25ba5197d89 100644 --- a/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go +++ b/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go @@ -5,6 +5,8 @@ import ( "fmt" "testing" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -16,177 +18,3661 @@ import ( type OrchestratedVirtualMachineScaleSetResource struct { } -func TestAccOrchestratedVirtualMachineScaleSet_basicZonal(t *testing.T) { +func TestAccOrchestratedVirtualMachineScaleSet_legacyBasicZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.legacyBasic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("extensions_time_budget", "max_bid_price", "priority"), }) } -func TestAccOrchestratedVirtualMachineScaleSet_updateZonal(t *testing.T) { +func TestAccOrchestratedVirtualMachineScaleSet_legacyUpdateZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.legacyBasic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("extensions_time_budget", "max_bid_price", "priority"), { - Config: r.update(data), + Config: r.legacyUpdate(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("extensions_time_budget", "max_bid_price", "priority"), { - Config: r.basic(data), + Config: r.legacyBasic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("extensions_time_budget", "max_bid_price", "priority"), }) } -func TestAccOrchestratedVirtualMachineScaleSet_basicNonZonal(t *testing.T) { +func TestAccOrchestratedVirtualMachineScaleSet_legacyBasicNonZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basicNonZonal(data), + Config: r.legacyBasicNonZonal(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("extensions_time_budget", "max_bid_price", "priority"), }) } -func TestAccOrchestratedVirtualMachineScaleSet_requiresImport(t *testing.T) { +func TestAccOrchestratedVirtualMachineScaleSet_legacyRequiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.legacyBasic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.RequiresImportErrorStep(r.requiresImport), + data.RequiresImportErrorStep(r.legacyRequiresImport), }) } -func (t OrchestratedVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.VirtualMachineScaleSetID(state.ID) - if err != nil { - return nil, err - } +func TestAccOrchestratedVirtualMachineScaleSet_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} - resp, err := clients.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name) - if err != nil { - return nil, fmt.Errorf("retrieving Compute Marketplace Agreement %q", id.String()) - } + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + // testing default scaleset values + check.That(data.ResourceName).Key("eviction_policy").HasValue(""), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} - return utils.Bool(resp.ID != nil), nil +func TestAccOrchestratedVirtualMachineScaleSet_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_orchestrated_virtual_machine_scale_set"), + }, + }) } -func (r OrchestratedVirtualMachineScaleSetResource) basic(data acceptance.TestData) string { - return fmt.Sprintf(` -%s +func TestAccOrchestratedVirtualMachineScaleSet_evictionPolicyDelete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} -resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { - name = "acctestVMO-%d" - location = azurerm_resource_group.test.location - resource_group_name = azurerm_resource_group.test.name + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.evictionPolicyDelete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("eviction_policy").HasValue("Delete"), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} - platform_fault_domain_count = 1 +// Not supported yet +// func TestAccOrchestratedVirtualMachineScaleSet_standardSSD(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} - zones = ["1"] +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.standardSSD(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), +// }) +// } - tags = { - ENV = "Test" - } +func TestAccOrchestratedVirtualMachineScaleSet_withPPG(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withPPG(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("proximity_placement_group_id").Exists(), + ), + }, + }) } -`, r.template(data), data.RandomInteger) + +func TestAccOrchestratedVirtualMachineScaleSet_basicPublicIP(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicPublicIP(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) } -func (r OrchestratedVirtualMachineScaleSetResource) update(data acceptance.TestData) string { - return fmt.Sprintf(` -%s +func TestAccOrchestratedVirtualMachineScaleSet_basicPublicIP_simpleUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} -resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { - name = "acctestVMO-%d" - location = azurerm_resource_group.test.location - resource_group_name = azurerm_resource_group.test.name + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicEmptyPublicIP(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + { + Config: r.basicEmptyPublicIP_updated_tags(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} - platform_fault_domain_count = 1 +func TestAccOrchestratedVirtualMachineScaleSet_updateNetworkProfile(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} - zones = ["1"] + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicEmptyPublicIP(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + { + Config: r.basicEmptyNetworkProfile_true_ipforwarding(data), + Check: acceptance.ComposeTestCheckFunc(), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} - tags = { - ENV = "Test", - FOO = "Bar" - } +func TestAccOrchestratedVirtualMachineScaleSet_updateNetworkProfile_ipconfiguration_dns_name_label(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicEmptyPublicIP(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + { + Config: r.basicEmptyPublicIP_updatedDNS_label(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) } -`, r.template(data), data.RandomInteger) + +func TestAccOrchestratedVirtualMachineScaleSet_verify_key_data_changed(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.linux(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password", "os_profile.0.custom_data"), + { + Config: r.linuxKeyDataUpdated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password", "os_profile.0.custom_data"), + }) } -func (r OrchestratedVirtualMachineScaleSetResource) requiresImport(data acceptance.TestData) string { - return fmt.Sprintf(` -%s +func TestAccOrchestratedVirtualMachineScaleSet_basicApplicationSecurity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} -resource "azurerm_orchestrated_virtual_machine_scale_set" "import" { - name = azurerm_orchestrated_virtual_machine_scale_set.test.name - location = azurerm_orchestrated_virtual_machine_scale_set.test.location - resource_group_name = azurerm_orchestrated_virtual_machine_scale_set.test.resource_group_name + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicApplicationSecurity(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} - platform_fault_domain_count = azurerm_orchestrated_virtual_machine_scale_set.test.platform_fault_domain_count - single_placement_group = azurerm_orchestrated_virtual_machine_scale_set.test.single_placement_group +func TestAccOrchestratedVirtualMachineScaleSet_basicAcceleratedNetworking(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicAcceleratedNetworking(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) } -`, r.basic(data)) + +func TestAccOrchestratedVirtualMachineScaleSet_basicIPForwarding(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicIPForwarding(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) } -func (r OrchestratedVirtualMachineScaleSetResource) basicNonZonal(data acceptance.TestData) string { - return fmt.Sprintf(` -%s +func TestAccOrchestratedVirtualMachineScaleSet_basicDNSSettings(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} -resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { - name = "acctestVMO-%d" - location = azurerm_resource_group.test.location - resource_group_name = azurerm_resource_group.test.name + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDNSSettings(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} - platform_fault_domain_count = 2 +func TestAccOrchestratedVirtualMachineScaleSet_bootDiagnostic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} - tags = { - ENV = "Test" - } + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.bootDiagnostic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) } -`, r.template(data), data.RandomInteger) + +func TestAccOrchestratedVirtualMachineScaleSet_bootDiagnosticNoStorage(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.bootDiagnostic_noStorage(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) } -func (OrchestratedVirtualMachineScaleSetResource) template(data acceptance.TestData) string { - return fmt.Sprintf(` -provider "azurerm" { - features {} +func TestAccOrchestratedVirtualMachineScaleSet_networkSecurityGroup(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.networkSecurityGroup(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) } -resource "azurerm_resource_group" "test" { - name = "acctestRG-VMSS-%d" - location = "%s" +func TestAccOrchestratedVirtualMachineScaleSet_basicWindows(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicWindows(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) } -`, data.RandomInteger, data.Locations.Primary) + +// Not Supported yet +// func TestAccOrchestratedVirtualMachineScaleSet_singlePlacementGroupFalse(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.singlePlacementGroupFalse(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// }) +// } + +func TestAccOrchestratedVirtualMachineScaleSet_linuxUpdated(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.linux(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.linuxUpdated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_customDataUpdated(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.linux(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.linuxCustomDataUpdated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_basicLinux_managedDisk(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicLinux_managedDisk(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} + +// Not Supported yet +// func TestAccOrchestratedVirtualMachineScaleSet_basicWindows_managedDisk(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.basicWindows_managedDisk(data, "Standard_D1_v2"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// }) +// } + +// Not Supported yet +// func TestAccOrchestratedVirtualMachineScaleSet_basicWindows_managedDisk_resize(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.basicWindows_managedDisk(data, "Standard_D1_v2"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// { +// Config: r.basicWindows_managedDisk(data, "Standard_D2_v2"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// }) +// } + +func TestAccOrchestratedVirtualMachineScaleSet_basicLinux_disappears(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + data.DisappearsStep(acceptance.DisappearsStepData{ + Config: r.basic, + TestResource: r, + }), + }) +} + +// Need to setup subscription to accept 3rd Party agreement +// func TestAccOrchestratedVirtualMachineScaleSet_planManagedDisk(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.planManagedDisk(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// }) +// } + +func TestAccOrchestratedVirtualMachineScaleSet_applicationGateway(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.applicationGatewayTemplate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + data.CheckWithClient(r.hasApplicationGateway), + ), + }, + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_loadBalancer(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.loadBalancerTemplate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + data.CheckWithClient(r.hasLoadBalancer), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_loadBalancerManagedDataDisks(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.loadBalancerTemplateManagedDataDisks(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("data_disk.#").HasValue("1"), + ), + }, + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_priority(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.priorityTemplate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("priority").HasValue("Spot"), + check.That(data.ResourceName).Key("eviction_policy").HasValue("Deallocate"), + ), + }, + }) +} + +// func TestAccOrchestratedVirtualMachineScaleSet_extension(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.extensionTemplate(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), +// }) +// } + +// func TestAccOrchestratedVirtualMachineScaleSet_extensionUpdate(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.extensionTemplate(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// { +// Config: r.extensionTemplateUpdated(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// }) +// } + +// func TestAccOrchestratedVirtualMachineScaleSet_multipleExtensions(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.multipleExtensionsTemplate(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), +// }) +// } + +// func TestAccOrchestratedVirtualMachineScaleSet_multipleExtensions_provision_after_extension(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.multipleExtensionsTemplate_provision_after_extension(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), +// }) +// } + +func TestAccOrchestratedVirtualMachineScaleSet_NonStandardCasing(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.nonStandardCasing(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.nonStandardCasing(data), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_importLinux(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.linux(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "os_profile.0.linux_configuration.0.admin_password", + "os_profile.0.custom_data", + ), + }) +} + +func TestAccOrchestratedVirtualMachineScaleSet_multipleNetworkProfiles(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.multipleNetworkProfiles(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +// Not Supported yet +// func TestAccOrchestratedVirtualMachineScaleSet_AutoUpdates(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.rollingAutoUpdates(data), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// ), +// }, +// }) +// } + +// Not Supported yet +// func TestAccOrchestratedVirtualMachineScaleSet_upgradeModeUpdate(t *testing.T) { +// data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") +// r := OrchestratedVirtualMachineScaleSetResource{} + +// data.ResourceTest(t, r, []acceptance.TestStep{ +// { +// Config: r.upgradeModeUpdate(data, "Manual"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Manual"), +// acceptance.TestCheckNoResourceAttr(data.ResourceName, "rolling_upgrade_policy.#"), +// ), +// }, +// { +// Config: r.upgradeModeUpdate(data, "Automatic"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Automatic"), +// acceptance.TestCheckNoResourceAttr(data.ResourceName, "rolling_upgrade_policy.#"), +// ), +// }, +// { +// Config: r.upgradeModeUpdate(data, "Rolling"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Rolling"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.#").HasValue("1"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_batch_instance_percent").HasValue("21"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_unhealthy_instance_percent").HasValue("22"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_unhealthy_upgraded_instance_percent").HasValue("23"), +// ), +// }, +// { +// PreConfig: func() { time.Sleep(1 * time.Minute) }, // VM Scale Set updates are not allowed while there is a Rolling Upgrade in progress. +// Config: r.upgradeModeUpdate(data, "Automatic"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Automatic"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.#").HasValue("1"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_batch_instance_percent").HasValue("20"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_unhealthy_instance_percent").HasValue("20"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_unhealthy_upgraded_instance_percent").HasValue("20"), +// ), +// }, +// { +// Config: r.upgradeModeUpdate(data, "Manual"), +// Check: acceptance.ComposeTestCheckFunc( +// check.That(data.ResourceName).ExistsInAzure(r), +// check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Manual"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.#").HasValue("1"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_batch_instance_percent").HasValue("20"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_unhealthy_instance_percent").HasValue("20"), +// check.That(data.ResourceName).Key("rolling_upgrade_policy.0.max_unhealthy_upgraded_instance_percent").HasValue("20"), +// ), +// }, +// }) +// } + +func TestAccOrchestratedVirtualMachineScaleSet_importBasic_managedDisk_withZones(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") + r := OrchestratedVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicLinux_managedDisk_withZones(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("os_profile.0.linux_configuration.0.admin_password"), + }) +} + +func (t OrchestratedVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := azure.ParseAzureResourceID(state.ID) + if err != nil { + return nil, err + } + resGroup := id.ResourceGroup + name := id.Path["virtualMachineScaleSets"] + + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := clients.Compute.VMScaleSetClient.Get(ctx, resGroup, name, compute.ExpandTypesForGetVMScaleSetsUserData) + if err != nil { + return nil, fmt.Errorf("retrieving Virtual Machine Scale Set %q", id) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (OrchestratedVirtualMachineScaleSetResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.VirtualMachineScaleSetID(state.ID) + if err != nil { + return nil, err + } + + // this is a preview feature we don't want to use right now + var forceDelete *bool = nil + future, err := client.Compute.VMScaleSetClient.Delete(ctx, id.ResourceGroup, id.Name, forceDelete) + if err != nil { + return nil, fmt.Errorf("Bad: deleting %s: %+v", *id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Compute.VMScaleSetClient.Client); err != nil { + return nil, fmt.Errorf("Bad: waiting for deletion of %s: %+v", *id, err) + } + + return utils.Bool(true), nil +} + +func (OrchestratedVirtualMachineScaleSetResource) hasLoadBalancer(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { + id, err := parse.VirtualMachineScaleSetID(state.ID) + if err != nil { + return err + } + + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + read, err := client.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) + if err != nil { + return err + } + + if props := read.VirtualMachineScaleSetProperties; props != nil { + if vmProfile := props.VirtualMachineProfile; vmProfile != nil { + if nwProfile := vmProfile.NetworkProfile; nwProfile != nil { + if nics := nwProfile.NetworkInterfaceConfigurations; nics != nil { + for _, nic := range *nics { + if nic.IPConfigurations == nil { + continue + } + + for _, config := range *nic.IPConfigurations { + if config.LoadBalancerBackendAddressPools == nil { + continue + } + + if len(*config.LoadBalancerBackendAddressPools) > 0 { + return nil + } + } + } + } + } + } + } + + return fmt.Errorf("load balancer configuration was missing") +} + +func (OrchestratedVirtualMachineScaleSetResource) hasApplicationGateway(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { + id, err := parse.VirtualMachineScaleSetID(state.ID) + if err != nil { + return err + } + + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + read, err := client.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) + if err != nil { + return err + } + + if props := read.VirtualMachineScaleSetProperties; props != nil { + if vmProfile := props.VirtualMachineProfile; vmProfile != nil { + if nwProfile := vmProfile.NetworkProfile; nwProfile != nil { + if nics := nwProfile.NetworkInterfaceConfigurations; nics != nil { + for _, nic := range *nics { + if nic.IPConfigurations == nil { + continue + } + + for _, config := range *nic.IPConfigurations { + if config.ApplicationGatewayBackendAddressPools == nil { + continue + } + + if len(*config.ApplicationGatewayBackendAddressPools) > 0 { + return nil + } + } + } + } + } + } + } + + return fmt.Errorf("application gateway configuration was missing") +} + +// legacy test cases for backward compatibility validation +func (r OrchestratedVirtualMachineScaleSetResource) legacyBasic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + platform_fault_domain_count = 1 + + zones = ["1"] + + tags = { + ENV = "Test" + } +} +`, r.legacyTemplate(data), data.RandomInteger) +} + +func (r OrchestratedVirtualMachineScaleSetResource) legacyUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + platform_fault_domain_count = 1 + + zones = ["1"] + + tags = { + ENV = "Test", + FOO = "Bar" + } +} +`, r.legacyTemplate(data), data.RandomInteger) +} + +func (r OrchestratedVirtualMachineScaleSetResource) legacyRequiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "import" { + name = azurerm_orchestrated_virtual_machine_scale_set.test.name + location = azurerm_orchestrated_virtual_machine_scale_set.test.location + resource_group_name = azurerm_orchestrated_virtual_machine_scale_set.test.resource_group_name + + platform_fault_domain_count = azurerm_orchestrated_virtual_machine_scale_set.test.platform_fault_domain_count +} +`, r.legacyBasic(data)) +} + +func (r OrchestratedVirtualMachineScaleSetResource) legacyBasicNonZonal(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + platform_fault_domain_count = 2 + + tags = { + ENV = "Test" + } +} +`, r.legacyTemplate(data), data.RandomInteger) +} + +func (OrchestratedVirtualMachineScaleSetResource) legacyTemplate(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +// Net new tests for the 2021-07-01 version of the API +func (OrchestratedVirtualMachineScaleSetResource) basic(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + zones = [] + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (r OrchestratedVirtualMachineScaleSetResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "import" { + name = azurerm_orchestrated_virtual_machine_scale_set.test.name + location = azurerm_orchestrated_virtual_machine_scale_set.test.location + resource_group_name = azurerm_orchestrated_virtual_machine_scale_set.test.resource_group_name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, r.basic(data), data.RandomInteger, data.RandomInteger) +} + +func (OrchestratedVirtualMachineScaleSetResource) evictionPolicyDelete(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + priority = "Spot" + eviction_policy = "Delete" + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) withPPG(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_proximity_placement_group" "test" { + name = "accPPG-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + proximity_placement_group_id = azurerm_proximity_placement_group.test.id +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicPublicIP(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = azurerm_subnet.test.id + primary = true + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicEmptyPublicIP(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + tags = { + state = "create" + } + + sku_name = "Standard_D1_v2" + instances = 0 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = azurerm_subnet.test.id + primary = true + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicEmptyPublicIP_updated_tags(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + tags = { + state = "update" + } + + sku_name = "Standard_D1_v2" + instances = 0 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = azurerm_subnet.test.id + primary = true + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicEmptyNetworkProfile_true_ipforwarding(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + tags = { + state = "update" + } + + sku_name = "Standard_D1_v2" + instances = 0 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + enable_ip_forwarding = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = azurerm_subnet.test.id + primary = true + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicEmptyPublicIP_updatedDNS_label(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + tags = { + state = "create" + } + + sku_name = "Standard_D1_v2" + instances = 0 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = azurerm_subnet.test.id + primary = true + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "updated-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicApplicationSecurity(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_application_security_group" "test" { + location = azurerm_resource_group.test.location + name = "TestApplicationSecurityGroup" + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + + application_security_group_ids = [azurerm_application_security_group.test.id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicAcceleratedNetworking(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D4_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + enable_accelerated_networking = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicIPForwarding(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D4_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + enable_ip_forwarding = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicDNSSettings(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D4_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + dns_servers = ["8.8.8.8", "8.8.4.4"] + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) networkSecurityGroup(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_network_security_group" "test" { + name = "acceptanceTestSecurityGroup-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + network_security_group_id = azurerm_network_security_group.test.id + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = azurerm_subnet.test.id + primary = true + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicWindows(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + windows_configuration { + computer_name_prefix = "testvm" + admin_username = "myadmin" + admin_password = "Passwword1234" + + enable_automatic_updates = false + provision_vm_agent = true + timezone = "W. Europe Standard Time" + + winrm_listener { + protocol = "Http" + } + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2016-Datacenter-Server-Core" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) linux(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + address_space = ["10.0.0.0/8"] +} + +resource "azurerm_subnet" "test" { + name = "acctestsn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku = "Standard" + + frontend_ip_configuration { + name = "ip-address" + public_ip_address_id = azurerm_public_ip.test.id + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbap-%[1]d" + loadbalancer_id = azurerm_lb.test.id +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku_name = "Standard_F2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + custom_data = "Y3VzdG9tIGRhdGEh" + + linux_configuration { + computer_name_prefix = "prefix" + admin_username = "ubuntu" + + admin_ssh_key { + username = "ubuntu" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" + } + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (OrchestratedVirtualMachineScaleSetResource) linuxUpdated(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + address_space = ["10.0.0.0/8"] +} + +resource "azurerm_subnet" "test" { + name = "acctestsn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku = "Standard" + + frontend_ip_configuration { + name = "ip-address" + public_ip_address_id = azurerm_public_ip.test.id + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbap-%[1]d" + loadbalancer_id = azurerm_lb.test.id +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku_name = "Standard_F2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + custom_data = "Y3VzdG9tIGRhdGEh" + + linux_configuration { + computer_name_prefix = "prefix" + admin_username = "ubuntu" + + admin_ssh_key { + username = "ubuntu" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" + } + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + tags = { + ThisIs = "a test" + } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (OrchestratedVirtualMachineScaleSetResource) linuxCustomDataUpdated(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + address_space = ["10.0.0.0/8"] +} + +resource "azurerm_subnet" "test" { + name = "acctestsn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku = "Standard" + + frontend_ip_configuration { + name = "ip-address" + public_ip_address_id = azurerm_public_ip.test.id + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbap-%[1]d" + loadbalancer_id = azurerm_lb.test.id +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku_name = "Standard_F2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + custom_data = "dXBkYXRlZCBjdXN0b20gZGF0YSE=" + + linux_configuration { + computer_name_prefix = "prefix" + admin_username = "ubuntu" + + admin_ssh_key { + username = "ubuntu" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" + } + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (OrchestratedVirtualMachineScaleSetResource) bootDiagnostic(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_storage_account" "test" { + name = "accsa%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" + + tags = { + environment = "staging" + } +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + storage_account_name = azurerm_storage_account.test.name + container_access_type = "private" +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + boot_diagnostics { + storage_account_uri = azurerm_storage_account.test.primary_blob_endpoint + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) bootDiagnostic_noStorage(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + boot_diagnostics {} + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) linuxKeyDataUpdated(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + address_space = ["10.0.0.0/8"] +} + +resource "azurerm_subnet" "test" { + name = "acctestsn-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku = "Standard" + + frontend_ip_configuration { + name = "ip-address" + public_ip_address_id = azurerm_public_ip.test.id + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbap-%[1]d" + loadbalancer_id = azurerm_lb.test.id +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + sku_name = "Standard_F2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + custom_data = "Y3VzdG9tIGRhdGEh" + + linux_configuration { + computer_name_prefix = "prefix" + admin_username = "ubuntu" + + admin_ssh_key { + username = "ubuntu" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDvXYZAjVUt2aojUV3XIA+PY6gXrgbvktXwf2NoIHGlQFhogpMEyOfqgogCtTBM7MNCS3ELul6SV+mlpH08Ki45ADIQuDXdommCvsMFW096JrsHOJpGfjCsJ1gbbv7brB3Ag+BSGb4qO3pRsEVTtZCeJDwfH5D7vmqP5xXcELKR4UAtKQKUhLvt6mhW90sFLTJeOTiYGbavIKqfCUFSeSMQkUPr8o3uzOfeWyCw7tc7szLuvfwJ5poGHuve73KKAlUnDTPUrhyj7iITZSDl+/i+bpDzPyCyJWDMsC0ON7q2fDr2mEz0L9ACrsI5Nx3lt5fe+IaHSrjivqnL8SqUWSN45o9Qp99sGWFiuTfos8f1jp+AXzC4ArVtKyRg/CnzKRiK0CGSxBJ5s9zAoa7yBBmjCszq89vFa0eMgpEIZFwa6kKJKt9AfRBXgO9YGPV4uaN7topy92/p2pE+vF8IafarbvnTDOQt62mS07tXYqYg1DhecrmBVWKlq9oafBweoeTjoq52SoGsuDc/YAOzIgWVIuvV8yKoh9KbXPWowjLtxDhRIS/d1nMMNdNI8X0TQivgi5+umMgAXhsVAKSNDUauLt4jimYkWAuE+R6KoCqVFdaB9bQDySBjAziruDSe3reToydjzzluvHMjWK8QiDynxs41pi4zZz6gAlca3QPkEQ== hello@world.com" + } + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicLinux_managedDisk(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 2 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) basicLinux_managedDisk_withZones(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + zones = ["1"] + platform_fault_domain_count = 1 + + sku_name = "Standard_D1_v2" + instances = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile-%[1]d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) applicationGatewayTemplate(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestPublicIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + application_gateway_backend_address_pool_ids = [azurerm_application_gateway.test.backend_address_pool[0].id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} + +# application gateway +resource "azurerm_subnet" "gwtest" { + name = "gw-subnet-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.3.0/24"] +} + +resource "azurerm_public_ip" "gwtest" { + name = "acctest-pubip-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + allocation_method = "Dynamic" +} + +resource "azurerm_application_gateway" "test" { + name = "acctestgw-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku { + name = "Standard_Medium" + tier = "Standard" + capacity = 1 + } + + gateway_ip_configuration { + name = "gw-ip-config1" + subnet_id = azurerm_subnet.gwtest.id + } + + frontend_ip_configuration { + name = "ip-config-public" + public_ip_address_id = azurerm_public_ip.gwtest.id + } + + frontend_ip_configuration { + name = "ip-config-private" + subnet_id = azurerm_subnet.gwtest.id + + private_ip_address_allocation = "Dynamic" + } + + frontend_port { + name = "port-8080" + port = 8080 + } + + backend_address_pool { + name = "pool-1" + } + + backend_http_settings { + name = "backend-http-1" + port = 8010 + protocol = "Http" + cookie_based_affinity = "Enabled" + request_timeout = 30 + + probe_name = "probe-1" + } + + http_listener { + name = "listener-1" + frontend_ip_configuration_name = "ip-config-public" + frontend_port_name = "port-8080" + protocol = "Http" + } + + probe { + name = "probe-1" + protocol = "Http" + path = "/test" + host = "azure.com" + timeout = 120 + interval = 300 + unhealthy_threshold = 8 + } + + request_routing_rule { + name = "rule-basic-1" + rule_type = "Basic" + http_listener_name = "listener-1" + backend_address_pool_name = "pool-1" + backend_http_settings_name = "backend-http-1" + } + + tags = { + environment = "tf01" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +func (OrchestratedVirtualMachineScaleSetResource) loadBalancerTemplate(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%[1]d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.2.0/24"] +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku = "Standard" + + frontend_ip_configuration { + name = "default" + subnet_id = azurerm_subnet.test.id + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "test" + loadbalancer_id = azurerm_lb.test.id +} + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "Standard_D1_v2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id] + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (OrchestratedVirtualMachineScaleSetResource) priorityTemplate(data acceptance.TestData) string { + r := OrchestratedVirtualMachineScaleSetResource{} + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-OVMSS-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { + name = "acctestOVMSS-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + priority = "Spot" + eviction_policy = "Deallocate" + + sku_name = "Standard_D1_v2" + instances = 1 + + platform_fault_domain_count = 2 + + os_profile { + linux_configuration { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + + disable_password_authentication = false + } + } + + network_interface { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = azurerm_subnet.test.id + + public_ip_address { + name = "TestPublicIPConfiguration" + domain_name_label = "test-domain-label" + idle_timeout_in_minutes = 4 + } + } + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +} + +// func (OrchestratedVirtualMachineScaleSetResource) extensionTemplate(data acceptance.TestData) string { +// r := OrchestratedVirtualMachineScaleSetResource{} +// return fmt.Sprintf(` +// provider "azurerm" { +// features {} +// } + +// resource "azurerm_resource_group" "test" { +// name = "acctestRG-OVMSS-%[1]d" +// location = "%[2]s" +// } + +// %[3]s + +// resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { +// name = "acctestOVMSS-%[1]d" +// location = azurerm_resource_group.test.location +// resource_group_name = azurerm_resource_group.test.name + +// sku_name = "Standard_D1_v2" +// instances = 1 + +// platform_fault_domain_count = 2 + +// os_profile { +// linux_configuration { +// computer_name_prefix = "testvm-%[1]d" +// admin_username = "myadmin" + +// admin_ssh_key { +// username = "myadmin" +// public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" +// } +// } +// } + +// network_interface { +// name = "TestNetworkProfile" +// primary = true + +// ip_configuration { +// name = "TestIPConfiguration" +// primary = true +// subnet_id = azurerm_subnet.test.id + +// public_ip_address { +// name = "TestPublicIPConfiguration" +// domain_name_label = "test-domain-label" +// idle_timeout_in_minutes = 4 +// } +// } +// } + +// os_disk { +// storage_account_type = "Standard_LRS" +// caching = "ReadWrite" +// } + +// source_image_reference { +// publisher = "Canonical" +// offer = "UbuntuServer" +// sku = "16.04-LTS" +// version = "latest" +// } + +// extension { +// name = "CustomScript" +// publisher = "Microsoft.Azure.Extensions" +// type = "CustomScript" +// type_handler_version = "2.0" +// auto_upgrade_minor_version = true + +// settings = jsonencode({ +// "commandToExecute" = "echo $HOSTNAME" +// }) + +// protected_settings = jsonencode({ +// "managedIdentity" = {} +// }) + +// } +// } +// `, data.RandomInteger, data.Locations.Primary, r.natgateway_template(data)) +// } + +// func (OrchestratedVirtualMachineScaleSetResource) extensionTemplateUpdated(data acceptance.TestData) string { +// r := OrchestratedVirtualMachineScaleSetResource{} +// return fmt.Sprintf(` +// provider "azurerm" { +// features {} +// } + +// resource "azurerm_resource_group" "test" { +// name = "acctestRG-OVMSS-%[1]d" +// location = "%[2]s" +// } + +// %[3]s + +// resource "azurerm_orchestrated_virtual_machine_scale_set" "test" { +// name = "acctestOVMSS-%[1]d" +// location = azurerm_resource_group.test.location +// resource_group_name = azurerm_resource_group.test.name + +// sku_name = "Standard_D1_v2" +// instances = 1 + +// platform_fault_domain_count = 2 + +// os_profile { +// linux_configuration { +// computer_name_prefix = "testvm-%[1]d" +// admin_username = "myadmin" + +// admin_ssh_key { +// username = "myadmin" +// public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" +// } +// } +// } + +// network_interface { +// name = "TestNetworkProfile" +// primary = true + +// ip_configuration { +// name = "TestIPConfiguration" +// primary = true +// subnet_id = azurerm_subnet.test.id + +// public_ip_address { +// name = "TestPublicIPConfiguration" +// domain_name_label = "test-domain-label" +// idle_timeout_in_minutes = 4 +// } +// } +// } + +// os_disk { +// storage_account_type = "Standard_LRS" +// caching = "ReadWrite" +// } + +// source_image_reference { +// publisher = "Canonical" +// offer = "UbuntuServer" +// sku = "16.04-LTS" +// version = "latest" +// } + +// extension { +// name = "CustomScript" +// publisher = "Microsoft.Azure.Extensions" +// type = "CustomScript" +// type_handler_version = "2.0" +// auto_upgrade_minor_version = true + +// settings = < 0 { - return fmt.Errorf("An `automatic_os_upgrade_policy` block cannot be specified when `upgrade_mode` is not set to `Automatic`") + return fmt.Errorf("an `automatic_os_upgrade_policy` block cannot be specified when `upgrade_mode` is not set to `Automatic`") } shouldHaveRollingUpgradePolicy := upgradeMode == compute.UpgradeModeAutomatic || upgradeMode == compute.UpgradeModeRolling if !shouldHaveRollingUpgradePolicy && len(rollingUpgradePolicyRaw) > 0 { - return fmt.Errorf("A `rolling_upgrade_policy` block cannot be specified when `upgrade_mode` is set to %q", string(upgradeMode)) + return fmt.Errorf("a `rolling_upgrade_policy` block cannot be specified when `upgrade_mode` is set to %q", string(upgradeMode)) } shouldHaveRollingUpgradePolicy = upgradeMode == compute.UpgradeModeRolling if shouldHaveRollingUpgradePolicy && len(rollingUpgradePolicyRaw) == 0 { - return fmt.Errorf("A `rolling_upgrade_policy` block must be specified when `upgrade_mode` is set to %q", string(upgradeMode)) + return fmt.Errorf("a `rolling_upgrade_policy` block must be specified when `upgrade_mode` is set to %q", string(upgradeMode)) } winRmListenersRaw := d.Get("winrm_listener").(*pluginsdk.Set).List() @@ -482,11 +483,11 @@ func resourceWindowsVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta if evictionPolicyRaw, ok := d.GetOk("eviction_policy"); ok { if virtualMachineProfile.Priority != compute.VirtualMachinePriorityTypesSpot { - return fmt.Errorf("An `eviction_policy` can only be specified when `priority` is set to `Spot`") + return fmt.Errorf("an `eviction_policy` can only be specified when `priority` is set to `Spot`") } virtualMachineProfile.EvictionPolicy = compute.VirtualMachineEvictionPolicyTypes(evictionPolicyRaw.(string)) } else if priority == compute.VirtualMachinePriorityTypesSpot { - return fmt.Errorf("An `eviction_policy` must be specified when `priority` is set to `Spot`") + return fmt.Errorf("an `eviction_policy` must be specified when `priority` is set to `Spot`") } if len(additionalUnattendContentRaw) > 0 { @@ -529,6 +530,10 @@ func resourceWindowsVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta SinglePlacementGroup: utils.Bool(d.Get("single_placement_group").(bool)), VirtualMachineProfile: &virtualMachineProfile, UpgradePolicy: &upgradePolicy, + // OrchestrationMode needs to be hardcoded to Uniform, for the + // standard VMSS resource, since virtualMachineProfile is now supported + // in both VMSS and Orchestrated VMSS... + OrchestrationMode: compute.OrchestrationModeUniform, ScaleInPolicy: &compute.ScaleInPolicy{ Rules: &[]compute.VirtualMachineScaleSetScaleInRules{compute.VirtualMachineScaleSetScaleInRules(scaleInPolicy)}, }, @@ -567,7 +572,8 @@ func resourceWindowsVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta log.Printf("[DEBUG] Virtual Machine Scale Set %q (Resource Group %q) was created", name, resourceGroup) log.Printf("[DEBUG] Retrieving Virtual Machine Scale Set %q (Resource Group %q)..", name, resourceGroup) - resp, err := client.Get(ctx, resourceGroup, name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Get(ctx, resourceGroup, name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { return fmt.Errorf("retrieving Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -593,7 +599,8 @@ func resourceWindowsVirtualMachineScaleSetUpdate(d *pluginsdk.ResourceData, meta updateInstances := false // retrieve - existing, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + existing, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { return fmt.Errorf("retrieving Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } @@ -906,7 +913,8 @@ func resourceWindowsVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta i return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if utils.ResponseWasNotFound(resp.Response) { log.Printf("[DEBUG] Windows Virtual Machine Scale Set %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) @@ -1128,7 +1136,8 @@ func resourceWindowsVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := client.Get(ctx, id.ResourceGroup, id.Name, compute.ExpandTypesForGetVMScaleSetsUserData) if err != nil { if utils.ResponseWasNotFound(resp.Response) { return nil diff --git a/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go b/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go index ae88f2b27142..e9750466c558 100644 --- a/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go +++ b/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go @@ -20,7 +20,8 @@ func (r WindowsVirtualMachineScaleSetResource) Exists(ctx context.Context, clien return nil, err } - resp, err := clients.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name) + // Upgrading to the 2021-07-01 exposed a new expand parameter in the GET method + resp, err := clients.Compute.VMScaleSetClient.Get(ctx, id.ResourceGroup, id.Name, "") if err != nil { return nil, fmt.Errorf("retrieving Compute Windows Virtual Machine Scale Set %q", id) } diff --git a/internal/services/databricks/databricks_customer_managed_key_resource.go b/internal/services/databricks/databricks_customer_managed_key_resource.go index 168b824a2592..56d680607bee 100644 --- a/internal/services/databricks/databricks_customer_managed_key_resource.go +++ b/internal/services/databricks/databricks_customer_managed_key_resource.go @@ -98,8 +98,8 @@ func DatabricksWorkspaceCustomerManagedKeyCreateUpdate(d *pluginsdk.ResourceData // Not sure if I should also lock the key vault here too // or at the very least the key? - locks.ByName(id.Name, "azurerm_databricks_workspace") - defer locks.UnlockByName(id.Name, "azurerm_databricks_workspace") + locks.ByName(id.WorkspaceName, "azurerm_databricks_workspace") + defer locks.UnlockByName(id.WorkspaceName, "azurerm_databricks_workspace") var encryptionEnabled, infrastructureEnabled bool workspace, err := workspaceClient.Get(ctx, *id) @@ -130,7 +130,7 @@ func DatabricksWorkspaceCustomerManagedKeyCreateUpdate(d *pluginsdk.ResourceData return fmt.Errorf("retrieving the Resource ID for the Key Vault at URL %q: %+v", key.KeyVaultBaseUrl, err) } - resourceID := parse.NewCustomerManagedKeyID(subscriptionId, id.ResourceGroup, id.Name) + resourceID := parse.NewCustomerManagedKeyID(subscriptionId, id.ResourceGroupName, id.WorkspaceName) if d.IsNewResource() { if workspace.Model != nil && workspace.Model.Properties.Parameters != nil && workspace.Model.Properties.Parameters.Encryption != nil { @@ -242,8 +242,8 @@ func DatabricksWorkspaceCustomerManagedKeyDelete(d *pluginsdk.ResourceData, meta workspaceID := workspaces.NewWorkspaceID(id.SubscriptionId, id.ResourceGroup, id.CustomerMangagedKeyName) // Not sure if I should also lock the key vault here too - locks.ByName(workspaceID.Name, "azurerm_databricks_workspace") - defer locks.UnlockByName(workspaceID.Name, "azurerm_databricks_workspace") + locks.ByName(workspaceID.WorkspaceName, "azurerm_databricks_workspace") + defer locks.UnlockByName(workspaceID.WorkspaceName, "azurerm_databricks_workspace") workspace, err := client.Get(ctx, workspaceID) if err != nil { diff --git a/internal/services/databricks/databricks_customer_managed_key_resource_test.go b/internal/services/databricks/databricks_customer_managed_key_resource_test.go index 2e2a58bcce3d..829885a1a9eb 100644 --- a/internal/services/databricks/databricks_customer_managed_key_resource_test.go +++ b/internal/services/databricks/databricks_customer_managed_key_resource_test.go @@ -111,7 +111,7 @@ func (DatabricksWorkspaceCustomerManagedKeyResource) Exists(ctx context.Context, resp, err := clients.DataBricks.WorkspacesClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Databricks Workspace Customer Mangaged Key %q (resource group: %q): %+v", id.Name, id.ResourceGroup, err) + return nil, fmt.Errorf("retrieving %s: %+v", id, err) } // This is the only way we can tell if the CMK has actually been provisioned or not... diff --git a/internal/services/databricks/databricks_workspace_data_source.go b/internal/services/databricks/databricks_workspace_data_source.go index 6ddc1a423723..9dafbff53652 100644 --- a/internal/services/databricks/databricks_workspace_data_source.go +++ b/internal/services/databricks/databricks_workspace_data_source.go @@ -68,8 +68,8 @@ func dataSourceDatabricksWorkspaceRead(d *pluginsdk.ResourceData, meta interface d.SetId(id.ID()) - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.WorkspaceName) + d.Set("resource_group_name", id.ResourceGroupName) d.Set("sku", resp.Model.Sku.Name) if model := resp.Model; model != nil { d.Set("workspace_id", model.Properties.WorkspaceId) diff --git a/internal/services/databricks/databricks_workspace_resource.go b/internal/services/databricks/databricks_workspace_resource.go index 2778f82ace32..8e6c62244f5a 100644 --- a/internal/services/databricks/databricks_workspace_resource.go +++ b/internal/services/databricks/databricks_workspace_resource.go @@ -368,7 +368,7 @@ func resourceDatabricksWorkspaceCreateUpdate(d *pluginsdk.ResourceData, meta int if managedResourceGroupName == "" { // no managed resource group name was provided, we use the default pattern log.Printf("[DEBUG][azurerm_databricks_workspace] no managed resource group id was provided, we use the default pattern.") - managedResourceGroupName = fmt.Sprintf("databricks-rg-%s", id.ResourceGroup) + managedResourceGroupName = fmt.Sprintf("databricks-rg-%s", id.ResourceGroupName) } managedResourceGroupID := resourcesParse.NewResourceGroupID(subscriptionId, managedResourceGroupName).ID() @@ -515,8 +515,8 @@ func resourceDatabricksWorkspaceRead(d *pluginsdk.ResourceData, meta interface{} return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.WorkspaceName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", azure.NormalizeLocation(model.Location)) diff --git a/internal/services/databricks/databricks_workspace_resource_test.go b/internal/services/databricks/databricks_workspace_resource_test.go index 51246b7cf732..c3e87ac50ea8 100644 --- a/internal/services/databricks/databricks_workspace_resource_test.go +++ b/internal/services/databricks/databricks_workspace_resource_test.go @@ -268,7 +268,7 @@ func (DatabricksWorkspaceResource) Exists(ctx context.Context, clients *clients. resp, err := clients.DataBricks.WorkspacesClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Analysis Services Server %q (resource group: %q): %+v", id.Name, id.ResourceGroup, err) + return nil, fmt.Errorf("retrieving Analysis Services Server %q (resource group: %q): %+v", id.WorkspaceName, id.ResourceGroupName, err) } return utils.Bool(resp.Model != nil), nil diff --git a/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection.go b/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection.go index f052dd34cc82..1aace477bfe4 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} + +// PrivateEndpointConnectionId is a struct representing the Resource ID for a Private Endpoint Connection type PrivateEndpointConnectionId struct { - SubscriptionId string - ResourceGroup string - WorkspaceName string - Name string + SubscriptionId string + ResourceGroupName string + WorkspaceName string + PrivateEndpointConnectionName string } -func NewPrivateEndpointConnectionID(subscriptionId, resourceGroup, workspaceName, name string) PrivateEndpointConnectionId { +// NewPrivateEndpointConnectionID returns a new PrivateEndpointConnectionId struct +func NewPrivateEndpointConnectionID(subscriptionId string, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) PrivateEndpointConnectionId { return PrivateEndpointConnectionId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - WorkspaceName: workspaceName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + PrivateEndpointConnectionName: privateEndpointConnectionName, } } -func (id PrivateEndpointConnectionId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Workspace Name %q", id.WorkspaceName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Private Endpoint Connection", segmentsStr) -} - -func (id PrivateEndpointConnectionId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/privateEndpointConnections/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.WorkspaceName, id.Name) -} - -// ParsePrivateEndpointConnectionID parses a PrivateEndpointConnection ID into an PrivateEndpointConnectionId struct +// ParsePrivateEndpointConnectionID parses 'input' into a PrivateEndpointConnectionId func ParsePrivateEndpointConnectionID(input string) (*PrivateEndpointConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateEndpointConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := PrivateEndpointConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.WorkspaceName, err = id.PopSegment("workspaces"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("privateEndpointConnections"); err != nil { - return nil, err + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParsePrivateEndpointConnectionIDInsensitively parses an PrivateEndpointConnection ID into an PrivateEndpointConnectionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParsePrivateEndpointConnectionID method should be used instead for validation etc. +// ParsePrivateEndpointConnectionIDInsensitively parses 'input' case-insensitively into a PrivateEndpointConnectionId +// note: this method should only be used for API response data and not user input func ParsePrivateEndpointConnectionIDInsensitively(input string) (*PrivateEndpointConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateEndpointConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := PrivateEndpointConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'workspaces' segment - workspacesKey := "workspaces" - for key := range id.Path { - if strings.EqualFold(key, workspacesKey) { - workspacesKey = key - break - } + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - if resourceId.WorkspaceName, err = id.PopSegment(workspacesKey); err != nil { - return nil, err + + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateEndpointConnections' segment - privateEndpointConnectionsKey := "privateEndpointConnections" - for key := range id.Path { - if strings.EqualFold(key, privateEndpointConnectionsKey) { - privateEndpointConnectionsKey = key - break - } + return &id, nil +} + +// ValidatePrivateEndpointConnectionID checks that 'input' can be parsed as a Private Endpoint Connection ID +func ValidatePrivateEndpointConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(privateEndpointConnectionsKey); err != nil { - return nil, err + + if _, err := ParsePrivateEndpointConnectionID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/privateEndpointConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName, id.PrivateEndpointConnectionName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + resourceids.StaticSegment("privateEndpointConnections", "privateEndpointConnections", "privateEndpointConnections"), + resourceids.UserSpecifiedSegment("privateEndpointConnectionName", "privateEndpointConnectionValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + fmt.Sprintf("Private Endpoint Connection Name: %q", id.PrivateEndpointConnectionName), + } + return fmt.Sprintf("Private Endpoint Connection (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection_test.go b/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection_test.go index 19f1dcd0bbce..a2461df644be 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/delete/id_privateendpointconnection_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = PrivateEndpointConnectionId{} +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} -func TestPrivateEndpointConnectionIDFormatter(t *testing.T) { - actual := NewPrivateEndpointConnectionID("{subscriptionId}", "{resourceGroupName}", "{workspaceName}", "{privateEndpointConnectionName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}" +func TestNewPrivateEndpointConnectionID(t *testing.T) { + id := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "privateEndpointConnectionValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } + + if id.PrivateEndpointConnectionName != "privateEndpointConnectionValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateEndpointConnectionName'", id.PrivateEndpointConnectionName, "privateEndpointConnectionValue") + } +} + +func TestFormatPrivateEndpointConnectionID(t *testing.T) { + actual := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "privateEndpointConnectionValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { Error bool Expected *PrivateEndpointConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.DATABRICKS/WORKSPACES/{WORKSPACENAME}/PRIVATEENDPOINTCONNECTIONS/{PRIVATEENDPOINTCONNECTIONNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.WorkspaceName != v.Expected.WorkspaceName { t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) } + } } @@ -131,106 +148,132 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { Error bool Expected *PrivateEndpointConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", - Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateendpointconnections/{privateEndpointConnectionName}", - Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WORKSPACES/{workspaceName}/PRIVATEENDPOINTCONNECTIONS/{privateEndpointConnectionName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WoRkSpAcEs/{workspaceName}/PrIvAtEeNdPoInTcOnNeCtIoNs/{privateEndpointConnectionName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + PrivateEndpointConnectionName: "pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,18 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.WorkspaceName != v.Expected.WorkspaceName { t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/client.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/client.go new file mode 100644 index 000000000000..c1f9e041af10 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/client.go @@ -0,0 +1,15 @@ +package outboundnetworkdependenciesendpoints + +import "github.com/Azure/go-autorest/autorest" + +type OutboundNetworkDependenciesEndpointsClient struct { + Client autorest.Client + baseUri string +} + +func NewOutboundNetworkDependenciesEndpointsClientWithBaseURI(endpoint string) OutboundNetworkDependenciesEndpointsClient { + return OutboundNetworkDependenciesEndpointsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/id_workspace.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/id_workspace.go new file mode 100644 index 000000000000..a42f3142f998 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/id_workspace.go @@ -0,0 +1,124 @@ +package outboundnetworkdependenciesendpoints + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WorkspaceId{} + +// WorkspaceId is a struct representing the Resource ID for a Workspace +type WorkspaceId struct { + SubscriptionId string + ResourceGroupName string + WorkspaceName string +} + +// NewWorkspaceID returns a new WorkspaceId struct +func NewWorkspaceID(subscriptionId string, resourceGroupName string, workspaceName string) WorkspaceId { + return WorkspaceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + } +} + +// ParseWorkspaceID parses 'input' into a WorkspaceId +func ParseWorkspaceID(input string) (*WorkspaceId, error) { + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseWorkspaceIDInsensitively parses 'input' case-insensitively into a WorkspaceId +// note: this method should only be used for API response data and not user input +func ParseWorkspaceIDInsensitively(input string) (*WorkspaceId, error) { + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateWorkspaceID checks that 'input' can be parsed as a Workspace ID +func ValidateWorkspaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseWorkspaceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Workspace ID +func (id WorkspaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Workspace ID +func (id WorkspaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + } +} + +// String returns a human-readable description of this Workspace ID +func (id WorkspaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + } + return fmt.Sprintf("Workspace (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/id_workspace_test.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/id_workspace_test.go new file mode 100644 index 000000000000..ad9994213dfa --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/id_workspace_test.go @@ -0,0 +1,264 @@ +package outboundnetworkdependenciesendpoints + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WorkspaceId{} + +func TestNewWorkspaceID(t *testing.T) { + id := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } +} + +func TestFormatWorkspaceID(t *testing.T) { + actual := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseWorkspaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWorkspaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + } +} + +func TestParseWorkspaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWorkspaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/method_list_autorest.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/method_list_autorest.go new file mode 100644 index 000000000000..5a0f9bff7de1 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/method_list_autorest.go @@ -0,0 +1,65 @@ +package outboundnetworkdependenciesendpoints + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListResponse struct { + HttpResponse *http.Response + Model *[]OutboundEnvironmentEndpoint +} + +// List ... +func (c OutboundNetworkDependenciesEndpointsClient) List(ctx context.Context, id WorkspaceId) (result ListResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "outboundnetworkdependenciesendpoints.OutboundNetworkDependenciesEndpointsClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "outboundnetworkdependenciesendpoints.OutboundNetworkDependenciesEndpointsClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "outboundnetworkdependenciesendpoints.OutboundNetworkDependenciesEndpointsClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForList prepares the List request. +func (c OutboundNetworkDependenciesEndpointsClient) preparerForList(ctx context.Context, id WorkspaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/outboundNetworkDependenciesEndpoints", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c OutboundNetworkDependenciesEndpointsClient) responderForList(resp *http.Response) (result ListResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_endpointdependency.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_endpointdependency.go new file mode 100644 index 000000000000..b2b3ad40cc07 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_endpointdependency.go @@ -0,0 +1,6 @@ +package outboundnetworkdependenciesendpoints + +type EndpointDependency struct { + DomainName *string `json:"domainName,omitempty"` + EndpointDetails *[]EndpointDetail `json:"endpointDetails,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_endpointdetail.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_endpointdetail.go new file mode 100644 index 000000000000..f55234a7b3e6 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_endpointdetail.go @@ -0,0 +1,8 @@ +package outboundnetworkdependenciesendpoints + +type EndpointDetail struct { + IpAddress *string `json:"ipAddress,omitempty"` + IsAccessible *bool `json:"isAccessible,omitempty"` + Latency *float64 `json:"latency,omitempty"` + Port *int64 `json:"port,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_outboundenvironmentendpoint.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_outboundenvironmentendpoint.go new file mode 100644 index 000000000000..886707727431 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/model_outboundenvironmentendpoint.go @@ -0,0 +1,6 @@ +package outboundnetworkdependenciesendpoints + +type OutboundEnvironmentEndpoint struct { + Category *string `json:"category,omitempty"` + Endpoints *[]EndpointDependency `json:"endpoints,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/version.go b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/version.go new file mode 100644 index 000000000000..748f4e7a5b3e --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/outboundnetworkdependenciesendpoints/version.go @@ -0,0 +1,9 @@ +package outboundnetworkdependenciesendpoints + +import "fmt" + +const defaultApiVersion = "2021-04-01-preview" + +func userAgent() string { + return fmt.Sprintf("pandora/outboundnetworkdependenciesendpoints/%s", defaultApiVersion) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/client.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/client.go new file mode 100644 index 000000000000..adc4283affb2 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/client.go @@ -0,0 +1,15 @@ +package privateendpointconnections + +import "github.com/Azure/go-autorest/autorest" + +type PrivateEndpointConnectionsClient struct { + Client autorest.Client + baseUri string +} + +func NewPrivateEndpointConnectionsClientWithBaseURI(endpoint string) PrivateEndpointConnectionsClient { + return PrivateEndpointConnectionsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/constants.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/constants.go new file mode 100644 index 000000000000..4649bfc8148c --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/constants.go @@ -0,0 +1,78 @@ +package privateendpointconnections + +import "strings" + +type PrivateEndpointConnectionProvisioningState string + +const ( + PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating" + PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting" + PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed" + PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded" + PrivateEndpointConnectionProvisioningStateUpdating PrivateEndpointConnectionProvisioningState = "Updating" +) + +func PossibleValuesForPrivateEndpointConnectionProvisioningState() []string { + return []string{ + "Creating", + "Deleting", + "Failed", + "Succeeded", + "Updating", + } +} + +func parsePrivateEndpointConnectionProvisioningState(input string) (*PrivateEndpointConnectionProvisioningState, error) { + vals := map[string]PrivateEndpointConnectionProvisioningState{ + "creating": "Creating", + "deleting": "Deleting", + "failed": "Failed", + "succeeded": "Succeeded", + "updating": "Updating", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PrivateEndpointConnectionProvisioningState(v) + return &out, nil +} + +type PrivateLinkServiceConnectionStatus string + +const ( + PrivateLinkServiceConnectionStatusApproved PrivateLinkServiceConnectionStatus = "Approved" + PrivateLinkServiceConnectionStatusDisconnected PrivateLinkServiceConnectionStatus = "Disconnected" + PrivateLinkServiceConnectionStatusPending PrivateLinkServiceConnectionStatus = "Pending" + PrivateLinkServiceConnectionStatusRejected PrivateLinkServiceConnectionStatus = "Rejected" +) + +func PossibleValuesForPrivateLinkServiceConnectionStatus() []string { + return []string{ + "Approved", + "Disconnected", + "Pending", + "Rejected", + } +} + +func parsePrivateLinkServiceConnectionStatus(input string) (*PrivateLinkServiceConnectionStatus, error) { + vals := map[string]PrivateLinkServiceConnectionStatus{ + "approved": "Approved", + "disconnected": "Disconnected", + "pending": "Pending", + "rejected": "Rejected", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PrivateLinkServiceConnectionStatus(v) + return &out, nil +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_privateendpointconnection.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_privateendpointconnection.go new file mode 100644 index 000000000000..acde749231cd --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_privateendpointconnection.go @@ -0,0 +1,137 @@ +package privateendpointconnections + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} + +// PrivateEndpointConnectionId is a struct representing the Resource ID for a Private Endpoint Connection +type PrivateEndpointConnectionId struct { + SubscriptionId string + ResourceGroupName string + WorkspaceName string + PrivateEndpointConnectionName string +} + +// NewPrivateEndpointConnectionID returns a new PrivateEndpointConnectionId struct +func NewPrivateEndpointConnectionID(subscriptionId string, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) PrivateEndpointConnectionId { + return PrivateEndpointConnectionId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + PrivateEndpointConnectionName: privateEndpointConnectionName, + } +} + +// ParsePrivateEndpointConnectionID parses 'input' into a PrivateEndpointConnectionId +func ParsePrivateEndpointConnectionID(input string) (*PrivateEndpointConnectionId, error) { + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := PrivateEndpointConnectionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParsePrivateEndpointConnectionIDInsensitively parses 'input' case-insensitively into a PrivateEndpointConnectionId +// note: this method should only be used for API response data and not user input +func ParsePrivateEndpointConnectionIDInsensitively(input string) (*PrivateEndpointConnectionId, error) { + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := PrivateEndpointConnectionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidatePrivateEndpointConnectionID checks that 'input' can be parsed as a Private Endpoint Connection ID +func ValidatePrivateEndpointConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParsePrivateEndpointConnectionID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/privateEndpointConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName, id.PrivateEndpointConnectionName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + resourceids.StaticSegment("privateEndpointConnections", "privateEndpointConnections", "privateEndpointConnections"), + resourceids.UserSpecifiedSegment("privateEndpointConnectionName", "privateEndpointConnectionValue"), + } +} + +// String returns a human-readable description of this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + fmt.Sprintf("Private Endpoint Connection Name: %q", id.PrivateEndpointConnectionName), + } + return fmt.Sprintf("Private Endpoint Connection (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_privateendpointconnection_test.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_privateendpointconnection_test.go new file mode 100644 index 000000000000..ba780168c140 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_privateendpointconnection_test.go @@ -0,0 +1,309 @@ +package privateendpointconnections + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} + +func TestNewPrivateEndpointConnectionID(t *testing.T) { + id := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "privateEndpointConnectionValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } + + if id.PrivateEndpointConnectionName != "privateEndpointConnectionValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateEndpointConnectionName'", id.PrivateEndpointConnectionName, "privateEndpointConnectionValue") + } +} + +func TestFormatPrivateEndpointConnectionID(t *testing.T) { + actual := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "privateEndpointConnectionValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParsePrivateEndpointConnectionID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateEndpointConnectionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue", + Expected: &PrivateEndpointConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateEndpointConnectionID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) + } + + } +} + +func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateEndpointConnectionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue", + Expected: &PrivateEndpointConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", + Expected: &PrivateEndpointConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + PrivateEndpointConnectionName: "pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateEndpointConnectionIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) + } + + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_workspace.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_workspace.go new file mode 100644 index 000000000000..3b33ae5dd874 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_workspace.go @@ -0,0 +1,124 @@ +package privateendpointconnections + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WorkspaceId{} + +// WorkspaceId is a struct representing the Resource ID for a Workspace +type WorkspaceId struct { + SubscriptionId string + ResourceGroupName string + WorkspaceName string +} + +// NewWorkspaceID returns a new WorkspaceId struct +func NewWorkspaceID(subscriptionId string, resourceGroupName string, workspaceName string) WorkspaceId { + return WorkspaceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + } +} + +// ParseWorkspaceID parses 'input' into a WorkspaceId +func ParseWorkspaceID(input string) (*WorkspaceId, error) { + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseWorkspaceIDInsensitively parses 'input' case-insensitively into a WorkspaceId +// note: this method should only be used for API response data and not user input +func ParseWorkspaceIDInsensitively(input string) (*WorkspaceId, error) { + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateWorkspaceID checks that 'input' can be parsed as a Workspace ID +func ValidateWorkspaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseWorkspaceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Workspace ID +func (id WorkspaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Workspace ID +func (id WorkspaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + } +} + +// String returns a human-readable description of this Workspace ID +func (id WorkspaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + } + return fmt.Sprintf("Workspace (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_workspace_test.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_workspace_test.go new file mode 100644 index 000000000000..719eff954e75 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/id_workspace_test.go @@ -0,0 +1,264 @@ +package privateendpointconnections + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WorkspaceId{} + +func TestNewWorkspaceID(t *testing.T) { + id := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } +} + +func TestFormatWorkspaceID(t *testing.T) { + actual := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseWorkspaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWorkspaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + } +} + +func TestParseWorkspaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWorkspaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/method_get_autorest.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/method_get_autorest.go new file mode 100644 index 000000000000..10ec2007dc6d --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/method_get_autorest.go @@ -0,0 +1,64 @@ +package privateendpointconnections + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *PrivateEndpointConnection +} + +// Get ... +func (c PrivateEndpointConnectionsClient) Get(ctx context.Context, id PrivateEndpointConnectionId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c PrivateEndpointConnectionsClient) preparerForGet(ctx context.Context, id PrivateEndpointConnectionId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c PrivateEndpointConnectionsClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/method_list_autorest.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/method_list_autorest.go new file mode 100644 index 000000000000..2a41d61f7253 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/method_list_autorest.go @@ -0,0 +1,183 @@ +package privateendpointconnections + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListResponse struct { + HttpResponse *http.Response + Model *[]PrivateEndpointConnection + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListResponse, error) +} + +type ListCompleteResult struct { + Items []PrivateEndpointConnection +} + +func (r ListResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListResponse) LoadMore(ctx context.Context) (resp ListResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// List ... +func (c PrivateEndpointConnectionsClient) List(ctx context.Context, id WorkspaceId) (resp ListResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "List", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForList(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "List", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListComplete retrieves all of the results into a single object +func (c PrivateEndpointConnectionsClient) ListComplete(ctx context.Context, id WorkspaceId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, PrivateEndpointConnectionPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c PrivateEndpointConnectionsClient) ListCompleteMatchingPredicate(ctx context.Context, id WorkspaceId, predicate PrivateEndpointConnectionPredicate) (resp ListCompleteResult, err error) { + items := make([]PrivateEndpointConnection, 0) + + page, err := c.List(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForList prepares the List request. +func (c PrivateEndpointConnectionsClient) preparerForList(ctx context.Context, id WorkspaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/privateEndpointConnections", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListWithNextLink prepares the List request with the given nextLink token. +func (c PrivateEndpointConnectionsClient) preparerForListWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c PrivateEndpointConnectionsClient) responderForList(resp *http.Response) (result ListResponse, err error) { + type page struct { + Values []PrivateEndpointConnection `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListResponse, err error) { + req, err := c.preparerForListWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "privateendpointconnections.PrivateEndpointConnectionsClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpoint.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpoint.go new file mode 100644 index 000000000000..33ffc888d58a --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpoint.go @@ -0,0 +1,5 @@ +package privateendpointconnections + +type PrivateEndpoint struct { + Id *string `json:"id,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpointconnection.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpointconnection.go new file mode 100644 index 000000000000..a7282003caf6 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpointconnection.go @@ -0,0 +1,8 @@ +package privateendpointconnections + +type PrivateEndpointConnection struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties PrivateEndpointConnectionProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpointconnectionproperties.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpointconnectionproperties.go new file mode 100644 index 000000000000..252bd91cbe86 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privateendpointconnectionproperties.go @@ -0,0 +1,7 @@ +package privateendpointconnections + +type PrivateEndpointConnectionProperties struct { + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` + PrivateLinkServiceConnectionState PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState"` + ProvisioningState *PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privatelinkserviceconnectionstate.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privatelinkserviceconnectionstate.go new file mode 100644 index 000000000000..c2f16e525600 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/model_privatelinkserviceconnectionstate.go @@ -0,0 +1,7 @@ +package privateendpointconnections + +type PrivateLinkServiceConnectionState struct { + ActionRequired *string `json:"actionRequired,omitempty"` + Description *string `json:"description,omitempty"` + Status PrivateLinkServiceConnectionStatus `json:"status"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/predicates.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/predicates.go new file mode 100644 index 000000000000..088034749cf2 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/predicates.go @@ -0,0 +1,24 @@ +package privateendpointconnections + +type PrivateEndpointConnectionPredicate struct { + Id *string + Name *string + Type *string +} + +func (p PrivateEndpointConnectionPredicate) Matches(input PrivateEndpointConnection) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/version.go b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/version.go new file mode 100644 index 000000000000..0e0108f8f914 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privateendpointconnections/version.go @@ -0,0 +1,9 @@ +package privateendpointconnections + +import "fmt" + +const defaultApiVersion = "2021-04-01-preview" + +func userAgent() string { + return fmt.Sprintf("pandora/privateendpointconnections/%s", defaultApiVersion) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/client.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/client.go new file mode 100644 index 000000000000..3ccc015d4fe1 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/client.go @@ -0,0 +1,15 @@ +package privatelinkresources + +import "github.com/Azure/go-autorest/autorest" + +type PrivateLinkResourcesClient struct { + Client autorest.Client + baseUri string +} + +func NewPrivateLinkResourcesClientWithBaseURI(endpoint string) PrivateLinkResourcesClient { + return PrivateLinkResourcesClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_privatelinkresource.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_privatelinkresource.go new file mode 100644 index 000000000000..5cc3c820aad1 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_privatelinkresource.go @@ -0,0 +1,137 @@ +package privatelinkresources + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = PrivateLinkResourceId{} + +// PrivateLinkResourceId is a struct representing the Resource ID for a Private Link Resource +type PrivateLinkResourceId struct { + SubscriptionId string + ResourceGroupName string + WorkspaceName string + GroupId string +} + +// NewPrivateLinkResourceID returns a new PrivateLinkResourceId struct +func NewPrivateLinkResourceID(subscriptionId string, resourceGroupName string, workspaceName string, groupId string) PrivateLinkResourceId { + return PrivateLinkResourceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + GroupId: groupId, + } +} + +// ParsePrivateLinkResourceID parses 'input' into a PrivateLinkResourceId +func ParsePrivateLinkResourceID(input string) (*PrivateLinkResourceId, error) { + parser := resourceids.NewParserFromResourceIdType(PrivateLinkResourceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := PrivateLinkResourceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + if id.GroupId, ok = parsed.Parsed["groupId"]; !ok { + return nil, fmt.Errorf("the segment 'groupId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParsePrivateLinkResourceIDInsensitively parses 'input' case-insensitively into a PrivateLinkResourceId +// note: this method should only be used for API response data and not user input +func ParsePrivateLinkResourceIDInsensitively(input string) (*PrivateLinkResourceId, error) { + parser := resourceids.NewParserFromResourceIdType(PrivateLinkResourceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := PrivateLinkResourceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + if id.GroupId, ok = parsed.Parsed["groupId"]; !ok { + return nil, fmt.Errorf("the segment 'groupId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidatePrivateLinkResourceID checks that 'input' can be parsed as a Private Link Resource ID +func ValidatePrivateLinkResourceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParsePrivateLinkResourceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Private Link Resource ID +func (id PrivateLinkResourceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/privateLinkResources/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName, id.GroupId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Link Resource ID +func (id PrivateLinkResourceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + resourceids.StaticSegment("privateLinkResources", "privateLinkResources", "privateLinkResources"), + resourceids.UserSpecifiedSegment("groupId", "groupIdValue"), + } +} + +// String returns a human-readable description of this Private Link Resource ID +func (id PrivateLinkResourceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + fmt.Sprintf("Group: %q", id.GroupId), + } + return fmt.Sprintf("Private Link Resource (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_privatelinkresource_test.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_privatelinkresource_test.go new file mode 100644 index 000000000000..93e4201856fb --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_privatelinkresource_test.go @@ -0,0 +1,309 @@ +package privatelinkresources + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = PrivateLinkResourceId{} + +func TestNewPrivateLinkResourceID(t *testing.T) { + id := NewPrivateLinkResourceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "groupIdValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } + + if id.GroupId != "groupIdValue" { + t.Fatalf("Expected %q but got %q for Segment 'GroupId'", id.GroupId, "groupIdValue") + } +} + +func TestFormatPrivateLinkResourceID(t *testing.T) { + actual := NewPrivateLinkResourceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "groupIdValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources/groupIdValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParsePrivateLinkResourceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateLinkResourceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources/groupIdValue", + Expected: &PrivateLinkResourceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + GroupId: "groupIdValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources/groupIdValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateLinkResourceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + if actual.GroupId != v.Expected.GroupId { + t.Fatalf("Expected %q but got %q for GroupId", v.Expected.GroupId, actual.GroupId) + } + + } +} + +func TestParsePrivateLinkResourceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateLinkResourceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeLiNkReSoUrCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources/groupIdValue", + Expected: &PrivateLinkResourceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + GroupId: "groupIdValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateLinkResources/groupIdValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeLiNkReSoUrCeS/gRoUpIdVaLuE", + Expected: &PrivateLinkResourceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + GroupId: "gRoUpIdVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeLiNkReSoUrCeS/gRoUpIdVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateLinkResourceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + if actual.GroupId != v.Expected.GroupId { + t.Fatalf("Expected %q but got %q for GroupId", v.Expected.GroupId, actual.GroupId) + } + + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_workspace.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_workspace.go new file mode 100644 index 000000000000..fb3c55d19634 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_workspace.go @@ -0,0 +1,124 @@ +package privatelinkresources + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WorkspaceId{} + +// WorkspaceId is a struct representing the Resource ID for a Workspace +type WorkspaceId struct { + SubscriptionId string + ResourceGroupName string + WorkspaceName string +} + +// NewWorkspaceID returns a new WorkspaceId struct +func NewWorkspaceID(subscriptionId string, resourceGroupName string, workspaceName string) WorkspaceId { + return WorkspaceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + } +} + +// ParseWorkspaceID parses 'input' into a WorkspaceId +func ParseWorkspaceID(input string) (*WorkspaceId, error) { + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseWorkspaceIDInsensitively parses 'input' case-insensitively into a WorkspaceId +// note: this method should only be used for API response data and not user input +func ParseWorkspaceIDInsensitively(input string) (*WorkspaceId, error) { + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateWorkspaceID checks that 'input' can be parsed as a Workspace ID +func ValidateWorkspaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseWorkspaceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Workspace ID +func (id WorkspaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Workspace ID +func (id WorkspaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + } +} + +// String returns a human-readable description of this Workspace ID +func (id WorkspaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + } + return fmt.Sprintf("Workspace (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_workspace_test.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_workspace_test.go new file mode 100644 index 000000000000..f2cff2e59b67 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/id_workspace_test.go @@ -0,0 +1,264 @@ +package privatelinkresources + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WorkspaceId{} + +func TestNewWorkspaceID(t *testing.T) { + id := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } +} + +func TestFormatWorkspaceID(t *testing.T) { + actual := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseWorkspaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWorkspaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + } +} + +func TestParseWorkspaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Expected: &WorkspaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWorkspaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + + } +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/method_get_autorest.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/method_get_autorest.go new file mode 100644 index 000000000000..85fc3cffa7a3 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/method_get_autorest.go @@ -0,0 +1,64 @@ +package privatelinkresources + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *GroupIdInformation +} + +// Get ... +func (c PrivateLinkResourcesClient) Get(ctx context.Context, id PrivateLinkResourceId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c PrivateLinkResourcesClient) preparerForGet(ctx context.Context, id PrivateLinkResourceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c PrivateLinkResourcesClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/method_list_autorest.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/method_list_autorest.go new file mode 100644 index 000000000000..5bb350cb743f --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/method_list_autorest.go @@ -0,0 +1,183 @@ +package privatelinkresources + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListResponse struct { + HttpResponse *http.Response + Model *[]GroupIdInformation + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListResponse, error) +} + +type ListCompleteResult struct { + Items []GroupIdInformation +} + +func (r ListResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListResponse) LoadMore(ctx context.Context) (resp ListResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// List ... +func (c PrivateLinkResourcesClient) List(ctx context.Context, id WorkspaceId) (resp ListResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "List", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "List", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForList(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "List", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListComplete retrieves all of the results into a single object +func (c PrivateLinkResourcesClient) ListComplete(ctx context.Context, id WorkspaceId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, GroupIdInformationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c PrivateLinkResourcesClient) ListCompleteMatchingPredicate(ctx context.Context, id WorkspaceId, predicate GroupIdInformationPredicate) (resp ListCompleteResult, err error) { + items := make([]GroupIdInformation, 0) + + page, err := c.List(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForList prepares the List request. +func (c PrivateLinkResourcesClient) preparerForList(ctx context.Context, id WorkspaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/privateLinkResources", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListWithNextLink prepares the List request with the given nextLink token. +func (c PrivateLinkResourcesClient) preparerForListWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c PrivateLinkResourcesClient) responderForList(resp *http.Response) (result ListResponse, err error) { + type page struct { + Values []GroupIdInformation `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListResponse, err error) { + req, err := c.preparerForListWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "privatelinkresources.PrivateLinkResourcesClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/model_groupidinformation.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/model_groupidinformation.go new file mode 100644 index 000000000000..89daef7c7be7 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/model_groupidinformation.go @@ -0,0 +1,8 @@ +package privatelinkresources + +type GroupIdInformation struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties GroupIdInformationProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/model_groupidinformationproperties.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/model_groupidinformationproperties.go new file mode 100644 index 000000000000..e0c04b2fc608 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/model_groupidinformationproperties.go @@ -0,0 +1,7 @@ +package privatelinkresources + +type GroupIdInformationProperties struct { + GroupId *string `json:"groupId,omitempty"` + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"` +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/predicates.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/predicates.go new file mode 100644 index 000000000000..e6b7f52e6fc0 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/predicates.go @@ -0,0 +1,24 @@ +package privatelinkresources + +type GroupIdInformationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p GroupIdInformationPredicate) Matches(input GroupIdInformation) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/version.go b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/version.go new file mode 100644 index 000000000000..8b95dfdb6be8 --- /dev/null +++ b/internal/services/databricks/sdk/2021-04-01-preview/privatelinkresources/version.go @@ -0,0 +1,9 @@ +package privatelinkresources + +import "fmt" + +const defaultApiVersion = "2021-04-01-preview" + +func userAgent() string { + return fmt.Sprintf("pandora/privatelinkresources/%s", defaultApiVersion) +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/put/constants.go b/internal/services/databricks/sdk/2021-04-01-preview/put/constants.go index b26a64181486..6c47ad4ca74f 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/put/constants.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/put/constants.go @@ -1,5 +1,7 @@ package put +import "strings" + type PrivateEndpointConnectionProvisioningState string const ( @@ -10,6 +12,35 @@ const ( PrivateEndpointConnectionProvisioningStateUpdating PrivateEndpointConnectionProvisioningState = "Updating" ) +func PossibleValuesForPrivateEndpointConnectionProvisioningState() []string { + return []string{ + "Creating", + "Deleting", + "Failed", + "Succeeded", + "Updating", + } +} + +func parsePrivateEndpointConnectionProvisioningState(input string) (*PrivateEndpointConnectionProvisioningState, error) { + vals := map[string]PrivateEndpointConnectionProvisioningState{ + "creating": "Creating", + "deleting": "Deleting", + "failed": "Failed", + "succeeded": "Succeeded", + "updating": "Updating", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PrivateEndpointConnectionProvisioningState(v) + return &out, nil +} + type PrivateLinkServiceConnectionStatus string const ( @@ -18,3 +49,30 @@ const ( PrivateLinkServiceConnectionStatusPending PrivateLinkServiceConnectionStatus = "Pending" PrivateLinkServiceConnectionStatusRejected PrivateLinkServiceConnectionStatus = "Rejected" ) + +func PossibleValuesForPrivateLinkServiceConnectionStatus() []string { + return []string{ + "Approved", + "Disconnected", + "Pending", + "Rejected", + } +} + +func parsePrivateLinkServiceConnectionStatus(input string) (*PrivateLinkServiceConnectionStatus, error) { + vals := map[string]PrivateLinkServiceConnectionStatus{ + "approved": "Approved", + "disconnected": "Disconnected", + "pending": "Pending", + "rejected": "Rejected", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PrivateLinkServiceConnectionStatus(v) + return &out, nil +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection.go b/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection.go index cbdf5f66f51a..dd865ea548e9 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} + +// PrivateEndpointConnectionId is a struct representing the Resource ID for a Private Endpoint Connection type PrivateEndpointConnectionId struct { - SubscriptionId string - ResourceGroup string - WorkspaceName string - Name string + SubscriptionId string + ResourceGroupName string + WorkspaceName string + PrivateEndpointConnectionName string } -func NewPrivateEndpointConnectionID(subscriptionId, resourceGroup, workspaceName, name string) PrivateEndpointConnectionId { +// NewPrivateEndpointConnectionID returns a new PrivateEndpointConnectionId struct +func NewPrivateEndpointConnectionID(subscriptionId string, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) PrivateEndpointConnectionId { return PrivateEndpointConnectionId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - WorkspaceName: workspaceName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + PrivateEndpointConnectionName: privateEndpointConnectionName, } } -func (id PrivateEndpointConnectionId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Workspace Name %q", id.WorkspaceName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Private Endpoint Connection", segmentsStr) -} - -func (id PrivateEndpointConnectionId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/privateEndpointConnections/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.WorkspaceName, id.Name) -} - -// ParsePrivateEndpointConnectionID parses a PrivateEndpointConnection ID into an PrivateEndpointConnectionId struct +// ParsePrivateEndpointConnectionID parses 'input' into a PrivateEndpointConnectionId func ParsePrivateEndpointConnectionID(input string) (*PrivateEndpointConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateEndpointConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := PrivateEndpointConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.WorkspaceName, err = id.PopSegment("workspaces"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("privateEndpointConnections"); err != nil { - return nil, err + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParsePrivateEndpointConnectionIDInsensitively parses an PrivateEndpointConnection ID into an PrivateEndpointConnectionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParsePrivateEndpointConnectionID method should be used instead for validation etc. +// ParsePrivateEndpointConnectionIDInsensitively parses 'input' case-insensitively into a PrivateEndpointConnectionId +// note: this method should only be used for API response data and not user input func ParsePrivateEndpointConnectionIDInsensitively(input string) (*PrivateEndpointConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateEndpointConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := PrivateEndpointConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'workspaces' segment - workspacesKey := "workspaces" - for key := range id.Path { - if strings.EqualFold(key, workspacesKey) { - workspacesKey = key - break - } + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - if resourceId.WorkspaceName, err = id.PopSegment(workspacesKey); err != nil { - return nil, err + + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateEndpointConnections' segment - privateEndpointConnectionsKey := "privateEndpointConnections" - for key := range id.Path { - if strings.EqualFold(key, privateEndpointConnectionsKey) { - privateEndpointConnectionsKey = key - break - } + return &id, nil +} + +// ValidatePrivateEndpointConnectionID checks that 'input' can be parsed as a Private Endpoint Connection ID +func ValidatePrivateEndpointConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(privateEndpointConnectionsKey); err != nil { - return nil, err + + if _, err := ParsePrivateEndpointConnectionID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/privateEndpointConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName, id.PrivateEndpointConnectionName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + resourceids.StaticSegment("privateEndpointConnections", "privateEndpointConnections", "privateEndpointConnections"), + resourceids.UserSpecifiedSegment("privateEndpointConnectionName", "privateEndpointConnectionValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + fmt.Sprintf("Private Endpoint Connection Name: %q", id.PrivateEndpointConnectionName), + } + return fmt.Sprintf("Private Endpoint Connection (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection_test.go b/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection_test.go index 2a1ac675df57..fef4e94abf72 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/put/id_privateendpointconnection_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = PrivateEndpointConnectionId{} +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} -func TestPrivateEndpointConnectionIDFormatter(t *testing.T) { - actual := NewPrivateEndpointConnectionID("{subscriptionId}", "{resourceGroupName}", "{workspaceName}", "{privateEndpointConnectionName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}" +func TestNewPrivateEndpointConnectionID(t *testing.T) { + id := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "privateEndpointConnectionValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } + + if id.PrivateEndpointConnectionName != "privateEndpointConnectionValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateEndpointConnectionName'", id.PrivateEndpointConnectionName, "privateEndpointConnectionValue") + } +} + +func TestFormatPrivateEndpointConnectionID(t *testing.T) { + actual := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "privateEndpointConnectionValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { Error bool Expected *PrivateEndpointConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.DATABRICKS/WORKSPACES/{WORKSPACENAME}/PRIVATEENDPOINTCONNECTIONS/{PRIVATEENDPOINTCONNECTIONNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.WorkspaceName != v.Expected.WorkspaceName { t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) } + } } @@ -131,106 +148,132 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { Error bool Expected *PrivateEndpointConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", - Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateendpointconnections/{privateEndpointConnectionName}", - Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WORKSPACES/{workspaceName}/PRIVATEENDPOINTCONNECTIONS/{privateEndpointConnectionName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WoRkSpAcEs/{workspaceName}/PrIvAtEeNdPoInTcOnNeCtIoNs/{privateEndpointConnectionName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + PrivateEndpointConnectionName: "pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,18 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.WorkspaceName != v.Expected.WorkspaceName { t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/constants.go b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/constants.go index a1fc3f6665db..6aeb853df6c8 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/constants.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/constants.go @@ -1,5 +1,7 @@ package vnetpeering +import "strings" + type PeeringProvisioningState string const ( @@ -9,6 +11,33 @@ const ( PeeringProvisioningStateUpdating PeeringProvisioningState = "Updating" ) +func PossibleValuesForPeeringProvisioningState() []string { + return []string{ + "Deleting", + "Failed", + "Succeeded", + "Updating", + } +} + +func parsePeeringProvisioningState(input string) (*PeeringProvisioningState, error) { + vals := map[string]PeeringProvisioningState{ + "deleting": "Deleting", + "failed": "Failed", + "succeeded": "Succeeded", + "updating": "Updating", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PeeringProvisioningState(v) + return &out, nil +} + type PeeringState string const ( @@ -16,3 +45,28 @@ const ( PeeringStateDisconnected PeeringState = "Disconnected" PeeringStateInitiated PeeringState = "Initiated" ) + +func PossibleValuesForPeeringState() []string { + return []string{ + "Connected", + "Disconnected", + "Initiated", + } +} + +func parsePeeringState(input string) (*PeeringState, error) { + vals := map[string]PeeringState{ + "connected": "Connected", + "disconnected": "Disconnected", + "initiated": "Initiated", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PeeringState(v) + return &out, nil +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering.go b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering.go index fc25b84aae70..0eac9e831595 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = VirtualNetworkPeeringId{} + +// VirtualNetworkPeeringId is a struct representing the Resource ID for a Virtual Network Peering type VirtualNetworkPeeringId struct { - SubscriptionId string - ResourceGroup string - WorkspaceName string - Name string + SubscriptionId string + ResourceGroupName string + WorkspaceName string + PeeringName string } -func NewVirtualNetworkPeeringID(subscriptionId, resourceGroup, workspaceName, name string) VirtualNetworkPeeringId { +// NewVirtualNetworkPeeringID returns a new VirtualNetworkPeeringId struct +func NewVirtualNetworkPeeringID(subscriptionId string, resourceGroupName string, workspaceName string, peeringName string) VirtualNetworkPeeringId { return VirtualNetworkPeeringId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - WorkspaceName: workspaceName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, + PeeringName: peeringName, } } -func (id VirtualNetworkPeeringId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Workspace Name %q", id.WorkspaceName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Virtual Network Peering", segmentsStr) -} - -func (id VirtualNetworkPeeringId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/virtualNetworkPeerings/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.WorkspaceName, id.Name) -} - -// ParseVirtualNetworkPeeringID parses a VirtualNetworkPeering ID into an VirtualNetworkPeeringId struct +// ParseVirtualNetworkPeeringID parses 'input' into a VirtualNetworkPeeringId func ParseVirtualNetworkPeeringID(input string) (*VirtualNetworkPeeringId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(VirtualNetworkPeeringId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := VirtualNetworkPeeringId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := VirtualNetworkPeeringId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.WorkspaceName, err = id.PopSegment("workspaces"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("virtualNetworkPeerings"); err != nil { - return nil, err + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PeeringName, ok = parsed.Parsed["peeringName"]; !ok { + return nil, fmt.Errorf("the segment 'peeringName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseVirtualNetworkPeeringIDInsensitively parses an VirtualNetworkPeering ID into an VirtualNetworkPeeringId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseVirtualNetworkPeeringID method should be used instead for validation etc. +// ParseVirtualNetworkPeeringIDInsensitively parses 'input' case-insensitively into a VirtualNetworkPeeringId +// note: this method should only be used for API response data and not user input func ParseVirtualNetworkPeeringIDInsensitively(input string) (*VirtualNetworkPeeringId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(VirtualNetworkPeeringId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := VirtualNetworkPeeringId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := VirtualNetworkPeeringId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'workspaces' segment - workspacesKey := "workspaces" - for key := range id.Path { - if strings.EqualFold(key, workspacesKey) { - workspacesKey = key - break - } + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - if resourceId.WorkspaceName, err = id.PopSegment(workspacesKey); err != nil { - return nil, err + + if id.PeeringName, ok = parsed.Parsed["peeringName"]; !ok { + return nil, fmt.Errorf("the segment 'peeringName' was not found in the resource id %q", input) } - // find the correct casing for the 'virtualNetworkPeerings' segment - virtualNetworkPeeringsKey := "virtualNetworkPeerings" - for key := range id.Path { - if strings.EqualFold(key, virtualNetworkPeeringsKey) { - virtualNetworkPeeringsKey = key - break - } + return &id, nil +} + +// ValidateVirtualNetworkPeeringID checks that 'input' can be parsed as a Virtual Network Peering ID +func ValidateVirtualNetworkPeeringID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(virtualNetworkPeeringsKey); err != nil { - return nil, err + + if _, err := ParseVirtualNetworkPeeringID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Virtual Network Peering ID +func (id VirtualNetworkPeeringId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s/virtualNetworkPeerings/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName, id.PeeringName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Virtual Network Peering ID +func (id VirtualNetworkPeeringId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), + resourceids.StaticSegment("virtualNetworkPeerings", "virtualNetworkPeerings", "virtualNetworkPeerings"), + resourceids.UserSpecifiedSegment("peeringName", "peeringValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Virtual Network Peering ID +func (id VirtualNetworkPeeringId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + fmt.Sprintf("Peering Name: %q", id.PeeringName), + } + return fmt.Sprintf("Virtual Network Peering (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering_test.go b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering_test.go index f71ac57209ce..983cee3571e1 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_virtualnetworkpeering_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = VirtualNetworkPeeringId{} +var _ resourceids.ResourceId = VirtualNetworkPeeringId{} -func TestVirtualNetworkPeeringIDFormatter(t *testing.T) { - actual := NewVirtualNetworkPeeringID("{subscriptionId}", "{resourceGroupName}", "{workspaceName}", "{peeringName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/virtualNetworkPeerings/{peeringName}" +func TestNewVirtualNetworkPeeringID(t *testing.T) { + id := NewVirtualNetworkPeeringID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "peeringValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } + + if id.PeeringName != "peeringValue" { + t.Fatalf("Expected %q but got %q for Segment 'PeeringName'", id.PeeringName, "peeringValue") + } +} + +func TestFormatVirtualNetworkPeeringID(t *testing.T) { + actual := NewVirtualNetworkPeeringID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue", "peeringValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings/peeringValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseVirtualNetworkPeeringID(t *testing.T) { Error bool Expected *VirtualNetworkPeeringId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/virtualNetworkPeerings/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/virtualNetworkPeerings/{peeringName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings/peeringValue", Expected: &VirtualNetworkPeeringId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{peeringName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PeeringName: "peeringValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.DATABRICKS/WORKSPACES/{WORKSPACENAME}/VIRTUALNETWORKPEERINGS/{PEERINGNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings/peeringValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseVirtualNetworkPeeringID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseVirtualNetworkPeeringID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.WorkspaceName != v.Expected.WorkspaceName { t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PeeringName != v.Expected.PeeringName { + t.Fatalf("Expected %q but got %q for PeeringName", v.Expected.PeeringName, actual.PeeringName) } + } } @@ -131,106 +148,132 @@ func TestParseVirtualNetworkPeeringIDInsensitively(t *testing.T) { Error bool Expected *VirtualNetworkPeeringId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for WorkspaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/virtualNetworkPeerings/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/virtualNetworkPeerings/{peeringName}", - Expected: &VirtualNetworkPeeringId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{peeringName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/virtualnetworkpeerings/{peeringName}", - Expected: &VirtualNetworkPeeringId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{peeringName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WORKSPACES/{workspaceName}/VIRTUALNETWORKPEERINGS/{peeringName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/vIrTuAlNeTwOrKpEeRiNgS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings/peeringValue", Expected: &VirtualNetworkPeeringId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{peeringName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", + PeeringName: "peeringValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WoRkSpAcEs/{workspaceName}/ViRtUaLnEtWoRkPeErInGs/{peeringName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/virtualNetworkPeerings/peeringValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/vIrTuAlNeTwOrKpEeRiNgS/pEeRiNgVaLuE", Expected: &VirtualNetworkPeeringId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - WorkspaceName: "{workspaceName}", - Name: "{peeringName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", + PeeringName: "pEeRiNgVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/vIrTuAlNeTwOrKpEeRiNgS/pEeRiNgVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseVirtualNetworkPeeringIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,18 @@ func TestParseVirtualNetworkPeeringIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.WorkspaceName != v.Expected.WorkspaceName { t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PeeringName != v.Expected.PeeringName { + t.Fatalf("Expected %q but got %q for PeeringName", v.Expected.PeeringName, actual.PeeringName) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace.go b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace.go index 80a70f6d7baa..a5e830b3e61e 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = WorkspaceId{} + +// WorkspaceId is a struct representing the Resource ID for a Workspace type WorkspaceId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + WorkspaceName string } -func NewWorkspaceID(subscriptionId, resourceGroup, name string) WorkspaceId { +// NewWorkspaceID returns a new WorkspaceId struct +func NewWorkspaceID(subscriptionId string, resourceGroupName string, workspaceName string) WorkspaceId { return WorkspaceId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, } } -func (id WorkspaceId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Workspace", segmentsStr) -} - -func (id WorkspaceId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParseWorkspaceID parses a Workspace ID into an WorkspaceId struct +// ParseWorkspaceID parses 'input' into a WorkspaceId func ParseWorkspaceID(input string) (*WorkspaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := WorkspaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := WorkspaceId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("workspaces"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseWorkspaceIDInsensitively parses an Workspace ID into an WorkspaceId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseWorkspaceID method should be used instead for validation etc. +// ParseWorkspaceIDInsensitively parses 'input' case-insensitively into a WorkspaceId +// note: this method should only be used for API response data and not user input func ParseWorkspaceIDInsensitively(input string) (*WorkspaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := WorkspaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - // find the correct casing for the 'workspaces' segment - workspacesKey := "workspaces" - for key := range id.Path { - if strings.EqualFold(key, workspacesKey) { - workspacesKey = key - break - } + return &id, nil +} + +// ValidateWorkspaceID checks that 'input' can be parsed as a Workspace ID +func ValidateWorkspaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(workspacesKey); err != nil { - return nil, err + + if _, err := ParseWorkspaceID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Workspace ID +func (id WorkspaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Workspace ID +func (id WorkspaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Workspace ID +func (id WorkspaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + } + return fmt.Sprintf("Workspace (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace_test.go b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace_test.go index 653657763efb..e736ac5d6a8b 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/vnetpeering/id_workspace_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = WorkspaceId{} +var _ resourceids.ResourceId = WorkspaceId{} -func TestWorkspaceIDFormatter(t *testing.T) { - actual := NewWorkspaceID("{subscriptionId}", "{resourceGroupName}", "{workspaceName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}" +func TestNewWorkspaceID(t *testing.T) { + id := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } +} + +func TestFormatWorkspaceID(t *testing.T) { + actual := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseWorkspaceID(t *testing.T) { Error bool Expected *WorkspaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.DATABRICKS/WORKSPACES/{WORKSPACENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseWorkspaceID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseWorkspaceID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } + } } @@ -115,90 +129,110 @@ func TestParseWorkspaceIDInsensitively(t *testing.T) { Error bool Expected *WorkspaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}", - Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}", - Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WORKSPACES/{workspaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WoRkSpAcEs/{workspaceName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseWorkspaceIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseWorkspaceIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/constants.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/constants.go index 06948ba728ce..d615baf6746f 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/constants.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/constants.go @@ -1,5 +1,7 @@ package workspaces +import "strings" + type CreatedByType string const ( @@ -9,6 +11,33 @@ const ( CreatedByTypeUser CreatedByType = "User" ) +func PossibleValuesForCreatedByType() []string { + return []string{ + "Application", + "Key", + "ManagedIdentity", + "User", + } +} + +func parseCreatedByType(input string) (*CreatedByType, error) { + vals := map[string]CreatedByType{ + "application": "Application", + "key": "Key", + "managedidentity": "ManagedIdentity", + "user": "User", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := CreatedByType(v) + return &out, nil +} + type CustomParameterType string const ( @@ -17,12 +46,58 @@ const ( CustomParameterTypeString CustomParameterType = "String" ) +func PossibleValuesForCustomParameterType() []string { + return []string{ + "Bool", + "Object", + "String", + } +} + +func parseCustomParameterType(input string) (*CustomParameterType, error) { + vals := map[string]CustomParameterType{ + "bool": "Bool", + "object": "Object", + "string": "String", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := CustomParameterType(v) + return &out, nil +} + type EncryptionKeySource string const ( EncryptionKeySourceMicrosoftPointKeyvault EncryptionKeySource = "Microsoft.Keyvault" ) +func PossibleValuesForEncryptionKeySource() []string { + return []string{ + "Microsoft.Keyvault", + } +} + +func parseEncryptionKeySource(input string) (*EncryptionKeySource, error) { + vals := map[string]EncryptionKeySource{ + "microsoftpointkeyvault": "Microsoft.Keyvault", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := EncryptionKeySource(v) + return &out, nil +} + type KeySource string const ( @@ -30,6 +105,29 @@ const ( KeySourceMicrosoftPointKeyvault KeySource = "Microsoft.Keyvault" ) +func PossibleValuesForKeySource() []string { + return []string{ + "Default", + "Microsoft.Keyvault", + } +} + +func parseKeySource(input string) (*KeySource, error) { + vals := map[string]KeySource{ + "default": "Default", + "microsoftpointkeyvault": "Microsoft.Keyvault", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := KeySource(v) + return &out, nil +} + type PrivateEndpointConnectionProvisioningState string const ( @@ -40,6 +138,35 @@ const ( PrivateEndpointConnectionProvisioningStateUpdating PrivateEndpointConnectionProvisioningState = "Updating" ) +func PossibleValuesForPrivateEndpointConnectionProvisioningState() []string { + return []string{ + "Creating", + "Deleting", + "Failed", + "Succeeded", + "Updating", + } +} + +func parsePrivateEndpointConnectionProvisioningState(input string) (*PrivateEndpointConnectionProvisioningState, error) { + vals := map[string]PrivateEndpointConnectionProvisioningState{ + "creating": "Creating", + "deleting": "Deleting", + "failed": "Failed", + "succeeded": "Succeeded", + "updating": "Updating", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PrivateEndpointConnectionProvisioningState(v) + return &out, nil +} + type PrivateLinkServiceConnectionStatus string const ( @@ -49,6 +176,33 @@ const ( PrivateLinkServiceConnectionStatusRejected PrivateLinkServiceConnectionStatus = "Rejected" ) +func PossibleValuesForPrivateLinkServiceConnectionStatus() []string { + return []string{ + "Approved", + "Disconnected", + "Pending", + "Rejected", + } +} + +func parsePrivateLinkServiceConnectionStatus(input string) (*PrivateLinkServiceConnectionStatus, error) { + vals := map[string]PrivateLinkServiceConnectionStatus{ + "approved": "Approved", + "disconnected": "Disconnected", + "pending": "Pending", + "rejected": "Rejected", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PrivateLinkServiceConnectionStatus(v) + return &out, nil +} + type ProvisioningState string const ( @@ -65,6 +219,47 @@ const ( ProvisioningStateUpdating ProvisioningState = "Updating" ) +func PossibleValuesForProvisioningState() []string { + return []string{ + "Accepted", + "Canceled", + "Created", + "Creating", + "Deleted", + "Deleting", + "Failed", + "Ready", + "Running", + "Succeeded", + "Updating", + } +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": "Accepted", + "canceled": "Canceled", + "created": "Created", + "creating": "Creating", + "deleted": "Deleted", + "deleting": "Deleting", + "failed": "Failed", + "ready": "Ready", + "running": "Running", + "succeeded": "Succeeded", + "updating": "Updating", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := ProvisioningState(v) + return &out, nil +} + type PublicNetworkAccess string const ( @@ -72,6 +267,29 @@ const ( PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" ) +func PossibleValuesForPublicNetworkAccess() []string { + return []string{ + "Disabled", + "Enabled", + } +} + +func parsePublicNetworkAccess(input string) (*PublicNetworkAccess, error) { + vals := map[string]PublicNetworkAccess{ + "disabled": "Disabled", + "enabled": "Enabled", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := PublicNetworkAccess(v) + return &out, nil +} + type RequiredNsgRules string const ( @@ -79,3 +297,28 @@ const ( RequiredNsgRulesNoAzureDatabricksRules RequiredNsgRules = "NoAzureDatabricksRules" RequiredNsgRulesNoAzureServiceRules RequiredNsgRules = "NoAzureServiceRules" ) + +func PossibleValuesForRequiredNsgRules() []string { + return []string{ + "AllRules", + "NoAzureDatabricksRules", + "NoAzureServiceRules", + } +} + +func parseRequiredNsgRules(input string) (*RequiredNsgRules, error) { + vals := map[string]RequiredNsgRules{ + "allrules": "AllRules", + "noazuredatabricksrules": "NoAzureDatabricksRules", + "noazureservicerules": "NoAzureServiceRules", + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // it could be a new value - best effort convert this + v := input + + out := RequiredNsgRules(v) + return &out, nil +} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup.go index c18545606c88..7fc48051bf51 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup_test.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup_test.go index 23a5f67a7d9b..3aadceae80f1 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,10 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription.go index 809dbcfbb739..79a3c1c21b29 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription_test.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription_test.go index ba3fb4fba30d..50ca833bffba 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,6 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace.go index fe25582ecf5c..94fb99b0a418 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = WorkspaceId{} + +// WorkspaceId is a struct representing the Resource ID for a Workspace type WorkspaceId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + WorkspaceName string } -func NewWorkspaceID(subscriptionId, resourceGroup, name string) WorkspaceId { +// NewWorkspaceID returns a new WorkspaceId struct +func NewWorkspaceID(subscriptionId string, resourceGroupName string, workspaceName string) WorkspaceId { return WorkspaceId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + WorkspaceName: workspaceName, } } -func (id WorkspaceId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Workspace", segmentsStr) -} - -func (id WorkspaceId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParseWorkspaceID parses a Workspace ID into an WorkspaceId struct +// ParseWorkspaceID parses 'input' into a WorkspaceId func ParseWorkspaceID(input string) (*WorkspaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := WorkspaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := WorkspaceId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("workspaces"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseWorkspaceIDInsensitively parses an Workspace ID into an WorkspaceId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseWorkspaceID method should be used instead for validation etc. +// ParseWorkspaceIDInsensitively parses 'input' case-insensitively into a WorkspaceId +// note: this method should only be used for API response data and not user input func ParseWorkspaceIDInsensitively(input string) (*WorkspaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(WorkspaceId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := WorkspaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := WorkspaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.WorkspaceName, ok = parsed.Parsed["workspaceName"]; !ok { + return nil, fmt.Errorf("the segment 'workspaceName' was not found in the resource id %q", input) } - // find the correct casing for the 'workspaces' segment - workspacesKey := "workspaces" - for key := range id.Path { - if strings.EqualFold(key, workspacesKey) { - workspacesKey = key - break - } + return &id, nil +} + +// ValidateWorkspaceID checks that 'input' can be parsed as a Workspace ID +func ValidateWorkspaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(workspacesKey); err != nil { - return nil, err + + if _, err := ParseWorkspaceID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Workspace ID +func (id WorkspaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.WorkspaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Workspace ID +func (id WorkspaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftDatabricks", "Microsoft.Databricks", "Microsoft.Databricks"), + resourceids.StaticSegment("workspaces", "workspaces", "workspaces"), + resourceids.UserSpecifiedSegment("workspaceName", "workspaceValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Workspace ID +func (id WorkspaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Workspace Name: %q", id.WorkspaceName), + } + return fmt.Sprintf("Workspace (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace_test.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace_test.go index 5b9fc0d0f7a2..23828d2b1bce 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace_test.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/id_workspace_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = WorkspaceId{} +var _ resourceids.ResourceId = WorkspaceId{} -func TestWorkspaceIDFormatter(t *testing.T) { - actual := NewWorkspaceID("{subscriptionId}", "{resourceGroupName}", "{workspaceName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}" +func TestNewWorkspaceID(t *testing.T) { + id := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.WorkspaceName != "workspaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'WorkspaceName'", id.WorkspaceName, "workspaceValue") + } +} + +func TestFormatWorkspaceID(t *testing.T) { + actual := NewWorkspaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "workspaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseWorkspaceID(t *testing.T) { Error bool Expected *WorkspaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.DATABRICKS/WORKSPACES/{WORKSPACENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseWorkspaceID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseWorkspaceID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } + } } @@ -115,90 +129,110 @@ func TestParseWorkspaceIDInsensitively(t *testing.T) { Error bool Expected *WorkspaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}", - Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}", - Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WORKSPACES/{workspaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue", Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + WorkspaceName: "workspaceValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/WoRkSpAcEs/{workspaceName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Databricks/workspaces/workspaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE", Expected: &WorkspaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{workspaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + WorkspaceName: "wOrKsPaCeVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAbRiCkS/wOrKsPaCeS/wOrKsPaCeVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseWorkspaceIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseWorkspaceIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) } + } } diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_systemdata.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_systemdata.go index c7da0009ca71..abdcca8a66b1 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_systemdata.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_systemdata.go @@ -1,11 +1,5 @@ package workspaces -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - type SystemData struct { CreatedAt *string `json:"createdAt,omitempty"` CreatedBy *string `json:"createdBy,omitempty"` @@ -14,27 +8,3 @@ type SystemData struct { LastModifiedBy *string `json:"lastModifiedBy,omitempty"` LastModifiedByType *CreatedByType `json:"lastModifiedByType,omitempty"` } - -func (o SystemData) GetCreatedAtAsTime() (*time.Time, error) { - if o.CreatedAt == nil { - return nil, nil - } - return dates.ParseAsFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o SystemData) SetCreatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.CreatedAt = &formatted -} - -func (o SystemData) GetLastModifiedAtAsTime() (*time.Time, error) { - if o.LastModifiedAt == nil { - return nil, nil - } - return dates.ParseAsFormat(o.LastModifiedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o SystemData) SetLastModifiedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.LastModifiedAt = &formatted -} diff --git a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_workspaceproperties.go b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_workspaceproperties.go index 34a8dd6b8ba6..9f8012789abe 100644 --- a/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_workspaceproperties.go +++ b/internal/services/databricks/sdk/2021-04-01-preview/workspaces/model_workspaceproperties.go @@ -1,11 +1,5 @@ package workspaces -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - type WorkspaceProperties struct { Authorizations *[]WorkspaceProviderAuthorization `json:"authorizations,omitempty"` CreatedBy *CreatedBy `json:"createdBy,omitempty"` @@ -23,15 +17,3 @@ type WorkspaceProperties struct { WorkspaceId *string `json:"workspaceId,omitempty"` WorkspaceUrl *string `json:"workspaceUrl,omitempty"` } - -func (o WorkspaceProperties) GetCreatedDateTimeAsTime() (*time.Time, error) { - if o.CreatedDateTime == nil { - return nil, nil - } - return dates.ParseAsFormat(o.CreatedDateTime, "2006-01-02T15:04:05Z07:00") -} - -func (o WorkspaceProperties) SetCreatedDateTimeAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.CreatedDateTime = &formatted -} diff --git a/internal/services/datalake/client/client.go b/internal/services/datalake/client/client.go index 1d76cdb59822..2c66d5397ccd 100644 --- a/internal/services/datalake/client/client.go +++ b/internal/services/datalake/client/client.go @@ -1,41 +1,46 @@ package client import ( - analytics "github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account" "github.com/Azure/azure-sdk-for-go/services/datalake/store/2016-11-01/filesystem" - store "github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account" "github.com/hashicorp/terraform-provider-azurerm/internal/common" + analyticsaccount "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakeanalytics/2016-11-01/accounts" + analyticsfirewallrules "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakeanalytics/2016-11-01/firewallrules" + storeaccount "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/accounts" + storefirewallrules "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/firewallrules" + storevirtualnetworkrules "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/virtualnetworkrules" ) type Client struct { // Data Lake Store - StoreAccountsClient *store.AccountsClient - StoreFirewallRulesClient *store.FirewallRulesClient - VirtualNetworkRulesClient *store.VirtualNetworkRulesClient + StoreAccountsClient *storeaccount.AccountsClient + StoreFirewallRulesClient *storefirewallrules.FirewallRulesClient + VirtualNetworkRulesClient *storevirtualnetworkrules.VirtualNetworkRulesClient StoreFilesClient *filesystem.Client // Data Lake Analytics - AnalyticsAccountsClient *analytics.AccountsClient - AnalyticsFirewallRulesClient *analytics.FirewallRulesClient + AnalyticsAccountsClient *analyticsaccount.AccountsClient + AnalyticsFirewallRulesClient *analyticsfirewallrules.FirewallRulesClient + + SubscriptionId string } func NewClient(o *common.ClientOptions) *Client { - StoreAccountsClient := store.NewAccountsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + StoreAccountsClient := storeaccount.NewAccountsClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&StoreAccountsClient.Client, o.ResourceManagerAuthorizer) - StoreFirewallRulesClient := store.NewFirewallRulesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + StoreFirewallRulesClient := storefirewallrules.NewFirewallRulesClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&StoreFirewallRulesClient.Client, o.ResourceManagerAuthorizer) - VirtualNetworkRulesClient := store.NewVirtualNetworkRulesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + VirtualNetworkRulesClient := storevirtualnetworkrules.NewVirtualNetworkRulesClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&VirtualNetworkRulesClient.Client, o.ResourceManagerAuthorizer) StoreFilesClient := filesystem.NewClient() o.ConfigureClient(&StoreFilesClient.Client, o.ResourceManagerAuthorizer) - AnalyticsAccountsClient := analytics.NewAccountsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + AnalyticsAccountsClient := analyticsaccount.NewAccountsClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&AnalyticsAccountsClient.Client, o.ResourceManagerAuthorizer) - AnalyticsFirewallRulesClient := analytics.NewFirewallRulesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + AnalyticsFirewallRulesClient := analyticsfirewallrules.NewFirewallRulesClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&AnalyticsFirewallRulesClient.Client, o.ResourceManagerAuthorizer) return &Client{ @@ -45,5 +50,6 @@ func NewClient(o *common.ClientOptions) *Client { StoreFilesClient: &StoreFilesClient, AnalyticsAccountsClient: &AnalyticsAccountsClient, AnalyticsFirewallRulesClient: &AnalyticsFirewallRulesClient, + SubscriptionId: o.SubscriptionId, } } diff --git a/internal/services/datalake/data_lake_analytics_account_resource.go b/internal/services/datalake/data_lake_analytics_account_resource.go index d1beb06de5fa..702781a49e37 100644 --- a/internal/services/datalake/data_lake_analytics_account_resource.go +++ b/internal/services/datalake/data_lake_analytics_account_resource.go @@ -5,18 +5,18 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakeanalytics/2016-11-01/accounts" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/suppress" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) func resourceDataLakeAnalyticsAccount() *pluginsdk.Resource { @@ -53,18 +53,18 @@ func resourceDataLakeAnalyticsAccount() *pluginsdk.Resource { "tier": { Type: pluginsdk.TypeString, Optional: true, - Default: string(account.Consumption), + Default: string(accounts.TierTypeConsumption), DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(account.Consumption), - string(account.Commitment100000AUHours), - string(account.Commitment10000AUHours), - string(account.Commitment1000AUHours), - string(account.Commitment100AUHours), - string(account.Commitment500000AUHours), - string(account.Commitment50000AUHours), - string(account.Commitment5000AUHours), - string(account.Commitment500AUHours), + string(accounts.TierTypeConsumption), + string(accounts.TierTypeCommitmentOneZeroZeroZeroZeroZeroAUHours), + string(accounts.TierTypeCommitmentOneZeroZeroZeroZeroAUHours), + string(accounts.TierTypeCommitmentOneZeroZeroZeroAUHours), + string(accounts.TierTypeCommitmentOneZeroZeroAUHours), + string(accounts.TierTypeCommitmentFiveZeroZeroZeroZeroZeroAUHours), + string(accounts.TierTypeCommitmentFiveZeroZeroZeroZeroAUHours), + string(accounts.TierTypeCommitmentFiveZeroZeroZeroAUHours), + string(accounts.TierTypeCommitmentFiveZeroZeroAUHours), }, true), }, @@ -82,51 +82,46 @@ func resourceDataLakeAnalyticsAccount() *pluginsdk.Resource { func resourceArmDateLakeAnalyticsAccountCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Datalake.AnalyticsAccountsClient - subscriptionId := meta.(*clients.Client).Datalake.AnalyticsAccountsClient.SubscriptionID + subscriptionId := meta.(*clients.Client).Datalake.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewAnalyticsAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + id := accounts.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) - existing, err := client.Get(ctx, id.ResourceGroup, id.AccountName) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return fmt.Errorf("checking for presence of existing Data Lake Analytics Account %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_data_lake_analytics_account", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_data_lake_analytics_account", id.ID()) } location := azure.NormalizeLocation(d.Get("location").(string)) storeAccountName := d.Get("default_store_account_name").(string) - tier := d.Get("tier").(string) + tier := accounts.TierType(d.Get("tier").(string)) t := d.Get("tags").(map[string]interface{}) log.Printf("[INFO] preparing arguments for Azure ARM Date Lake Store creation %s", id) - dateLakeAnalyticsAccount := account.CreateDataLakeAnalyticsAccountParameters{ - Location: &location, - Tags: tags.Expand(t), - CreateDataLakeAnalyticsAccountProperties: &account.CreateDataLakeAnalyticsAccountProperties{ - NewTier: account.TierType(tier), - DefaultDataLakeStoreAccount: &storeAccountName, - DataLakeStoreAccounts: &[]account.AddDataLakeStoreWithAccountParameters{ + dateLakeAnalyticsAccount := accounts.CreateDataLakeAnalyticsAccountParameters{ + Location: location, + Tags: expandTags(t), + Properties: accounts.CreateDataLakeAnalyticsAccountProperties{ + NewTier: &tier, + DefaultDataLakeStoreAccount: storeAccountName, + DataLakeStoreAccounts: []accounts.AddDataLakeStoreWithAccountParameters{ { - Name: &storeAccountName, + Name: storeAccountName, }, }, }, } - future, err := client.Create(ctx, id.ResourceGroup, id.AccountName, dateLakeAnalyticsAccount) - if err != nil { - return fmt.Errorf("issuing create request for Data Lake Analytics Account %s: %+v", id, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("creating Data Lake Analytics Account %s: %+v", id, err) + if err := client.CreateThenPoll(ctx, id, dateLakeAnalyticsAccount); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } d.SetId(id.ID()) @@ -139,34 +134,29 @@ func resourceArmDateLakeAnalyticsAccountUpdate(d *pluginsdk.ResourceData, meta i ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AnalyticsAccountID(d.Id()) + id, err := accounts.ParseAccountID(d.Id()) if err != nil { return err } storeAccountName := d.Get("default_store_account_name").(string) - newTier := d.Get("tier").(string) + newTier := accounts.TierType(d.Get("tier").(string)) newTags := d.Get("tags").(map[string]interface{}) - props := &account.UpdateDataLakeAnalyticsAccountParameters{ - Tags: tags.Expand(newTags), - UpdateDataLakeAnalyticsAccountProperties: &account.UpdateDataLakeAnalyticsAccountProperties{ - NewTier: account.TierType(newTier), - DataLakeStoreAccounts: &[]account.UpdateDataLakeStoreWithAccountParameters{ + props := accounts.UpdateDataLakeAnalyticsAccountParameters{ + Tags: expandTags(newTags), + Properties: &accounts.UpdateDataLakeAnalyticsAccountProperties{ + NewTier: &newTier, + DataLakeStoreAccounts: &[]accounts.UpdateDataLakeStoreWithAccountParameters{ { - Name: &storeAccountName, + Name: storeAccountName, }, }, }, } - future, err := client.Update(ctx, id.ResourceGroup, id.AccountName, props) - if err != nil { - return fmt.Errorf("issuing update request for Data Lake Analytics Account %s: %+v", id, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for the update of Data Lake Analytics Account %s to complete: %+v", id, err) + if err := client.UpdateThenPoll(ctx, *id, props); err != nil { + return fmt.Errorf("updating %s: %+v", id, err) } return resourceArmDateLakeAnalyticsAccountRead(d, meta) @@ -177,33 +167,41 @@ func resourceArmDateLakeAnalyticsAccountRead(d *pluginsdk.ResourceData, meta int ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AnalyticsAccountID(d.Id()) + id, err := accounts.ParseAccountID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.AccountName) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[WARN] DataLakeAnalyticsAccountAccount '%s'", id) d.SetId("") return nil } - return fmt.Errorf("making Read request on Azure Data Lake Analytics Account %s: %+v", id, err) + return fmt.Errorf("reading %s: %+v", id, err) } - d.Set("name", id.AccountName) + d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - if properties := resp.DataLakeAnalyticsAccountProperties; properties != nil { - d.Set("tier", string(properties.CurrentTier)) - d.Set("default_store_account_name", properties.DefaultDataLakeStoreAccount) - } + if model := resp.Model; model != nil { + if location := model.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } + + if properties := model.Properties; properties != nil { + tier := "" + if properties.CurrentTier != nil { + tier = string(*properties.CurrentTier) + } + d.Set("tier", tier) + d.Set("default_store_account_name", properties.DefaultDataLakeStoreAccount) + } - return tags.FlattenAndSet(d, resp.Tags) + return tags.FlattenAndSet(d, flattenTags(model.Tags)) + } + return nil } func resourceArmDateLakeAnalyticsAccountDelete(d *pluginsdk.ResourceData, meta interface{}) error { @@ -211,18 +209,13 @@ func resourceArmDateLakeAnalyticsAccountDelete(d *pluginsdk.ResourceData, meta i ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AnalyticsAccountID(d.Id()) + id, err := accounts.ParseAccountID(d.Id()) if err != nil { return err } - future, err := client.Delete(ctx, id.ResourceGroup, id.AccountName) - if err != nil { - return fmt.Errorf("deleting Data Lake Analytics Account %s: %+v", id, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for the deletion of Data Lake Analytics Account %s: %+v", id, err) + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) } return nil diff --git a/internal/services/datalake/data_lake_analytics_account_resource_test.go b/internal/services/datalake/data_lake_analytics_account_resource_test.go index ad5e9669ff6a..98eb8ad0738e 100644 --- a/internal/services/datalake/data_lake_analytics_account_resource_test.go +++ b/internal/services/datalake/data_lake_analytics_account_resource_test.go @@ -6,10 +6,11 @@ import ( "strconv" "testing" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakeanalytics/2016-11-01/accounts" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -88,17 +89,17 @@ func TestAccDataLakeAnalyticsAccount_withTags(t *testing.T) { } func (t DataLakeAnalyticsAccountResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.AnalyticsAccountID(state.ID) + id, err := accounts.ParseAccountID(state.ID) if err != nil { return nil, err } - resp, err := clients.Datalake.AnalyticsAccountsClient.Get(ctx, id.ResourceGroup, id.AccountName) + resp, err := clients.Datalake.AnalyticsAccountsClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Date Lake Analytics Account %s: %+v", id, err) + return nil, fmt.Errorf("retrieving %s: %+v", id, err) } - return utils.Bool(resp.DataLakeAnalyticsAccountProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (DataLakeAnalyticsAccountResource) basic(data acceptance.TestData) string { diff --git a/internal/services/datalake/data_lake_analytics_firewall_rule_resource.go b/internal/services/datalake/data_lake_analytics_firewall_rule_resource.go index 38f19f778718..fa71d5c53c9d 100644 --- a/internal/services/datalake/data_lake_analytics_firewall_rule_resource.go +++ b/internal/services/datalake/data_lake_analytics_firewall_rule_resource.go @@ -2,21 +2,19 @@ package datalake import ( "fmt" - "log" "time" - "github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" commonValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakeanalytics/2016-11-01/firewallrules" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) func resourceDataLakeAnalyticsFirewallRule() *pluginsdk.Resource { @@ -72,22 +70,22 @@ func resourceDataLakeAnalyticsFirewallRule() *pluginsdk.Resource { func resourceArmDateLakeAnalyticsFirewallRuleCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Datalake.AnalyticsFirewallRulesClient - subscriptionId := meta.(*clients.Client).Datalake.AnalyticsFirewallRulesClient.SubscriptionID + subscriptionId := meta.(*clients.Client).Datalake.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewAnalyticsFirewallRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("account_name").(string), d.Get("name").(string)) + id := firewallrules.NewFirewallRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("account_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, id.ResourceGroup, id.AccountName, id.FirewallRuleName) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return fmt.Errorf("checking for presence of existing Data Lake Analytics Firewall Rule %s %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_data_lake_analytics_firewall_rule", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_data_lake_analytics_firewall_rule", id.ID()) } } @@ -96,15 +94,15 @@ func resourceArmDateLakeAnalyticsFirewallRuleCreateUpdate(d *pluginsdk.ResourceD log.Printf("[INFO] preparing arguments for Date Lake Analytics Firewall Rule creation %s", id) - dateLakeStore := account.CreateOrUpdateFirewallRuleParameters{ - CreateOrUpdateFirewallRuleProperties: &account.CreateOrUpdateFirewallRuleProperties{ - StartIPAddress: utils.String(startIPAddress), - EndIPAddress: utils.String(endIPAddress), + dateLakeStore := firewallrules.CreateOrUpdateFirewallRuleParameters{ + Properties: firewallrules.CreateOrUpdateFirewallRuleProperties{ + StartIpAddress: startIPAddress, + EndIpAddress: endIPAddress, }, } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.AccountName, id.FirewallRuleName, dateLakeStore); err != nil { - return fmt.Errorf("issuing create request for Data Lake Analytics %q (Resource Group %q): %+v", id.AccountName, id.ResourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id, dateLakeStore); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } d.SetId(id.ID()) @@ -117,28 +115,30 @@ func resourceArmDateLakeAnalyticsFirewallRuleRead(d *pluginsdk.ResourceData, met ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AnalyticsFirewallRuleID(d.Id()) + id, err := firewallrules.ParseFirewallRuleID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.AccountName, id.FirewallRuleName) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[WARN] Data Lake Analytics Firewall Rule %s", id) d.SetId("") return nil } - return fmt.Errorf("making Read request on Azure Data Lake Analytics Firewall Rule %s: %+v", id, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.FirewallRuleName) + d.Set("name", id.Name) d.Set("account_name", id.AccountName) d.Set("resource_group_name", id.ResourceGroup) - if props := resp.FirewallRuleProperties; props != nil { - d.Set("start_ip_address", props.StartIPAddress) - d.Set("end_ip_address", props.EndIPAddress) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("start_ip_address", props.StartIpAddress) + d.Set("end_ip_address", props.EndIpAddress) + } } return nil @@ -149,17 +149,17 @@ func resourceArmDateLakeAnalyticsFirewallRuleDelete(d *pluginsdk.ResourceData, m ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AnalyticsFirewallRuleID(d.Id()) + id, err := firewallrules.ParseFirewallRuleID(d.Id()) if err != nil { return err } - resp, err := client.Delete(ctx, id.ResourceGroup, id.AccountName, id.FirewallRuleName) + resp, err := client.Delete(ctx, *id) if err != nil { - if response.WasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return nil } - return fmt.Errorf("issuing delete request for Data Lake Analytics Firewall Rule %s: %+v", id, err) + return fmt.Errorf("deleting %s: %+v", id, err) } return nil diff --git a/internal/services/datalake/data_lake_analytics_firewall_rule_resource_test.go b/internal/services/datalake/data_lake_analytics_firewall_rule_resource_test.go index ac89696ceaca..6939e673148c 100644 --- a/internal/services/datalake/data_lake_analytics_firewall_rule_resource_test.go +++ b/internal/services/datalake/data_lake_analytics_firewall_rule_resource_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakeanalytics/2016-11-01/firewallrules" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -101,17 +101,17 @@ func TestAccDataLakeAnalyticsFirewallRule_azureServices(t *testing.T) { } func (t DataLakeAnalyticsFirewallRuleResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.AnalyticsFirewallRuleID(state.ID) + id, err := firewallrules.ParseFirewallRuleID(state.ID) if err != nil { return nil, err } - resp, err := clients.Datalake.AnalyticsFirewallRulesClient.Get(ctx, id.ResourceGroup, id.AccountName, id.FirewallRuleName) + resp, err := clients.Datalake.AnalyticsFirewallRulesClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Date Lake Analytics Firewall Rule %s: %v", id, err) + return nil, fmt.Errorf("retrieving %s: %v", id, err) } - return utils.Bool(resp.FirewallRuleProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (DataLakeAnalyticsFirewallRuleResource) basic(data acceptance.TestData, startIP, endIP string) string { diff --git a/internal/services/datalake/data_lake_store_data_source.go b/internal/services/datalake/data_lake_store_data_source.go index e91d9b2fabdf..a4702f5c04cf 100644 --- a/internal/services/datalake/data_lake_store_data_source.go +++ b/internal/services/datalake/data_lake_store_data_source.go @@ -4,13 +4,13 @@ import ( "fmt" "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/accounts" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) func dataSourceDataLakeStoreAccount() *pluginsdk.Resource { @@ -63,39 +63,61 @@ func dataSourceDataLakeStoreAccount() *pluginsdk.Resource { func dataSourceArmDateLakeStoreAccountRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Datalake.StoreAccountsClient - subscriptionId := meta.(*clients.Client).Datalake.StoreAccountsClient.SubscriptionID + subscriptionId := meta.(*clients.Client).Datalake.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + id := accounts.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + resp, err := client.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Data Lake Store Account %s was not found", id) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("retrieving Data Lake Store Account %s: %+v", id, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } d.SetId(id.ID()) d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - - if properties := resp.DataLakeStoreAccountProperties; properties != nil { - d.Set("tier", string(properties.CurrentTier)) - d.Set("encryption_state", string(properties.EncryptionState)) - d.Set("firewall_allow_azure_ips", string(properties.FirewallAllowAzureIps)) - d.Set("firewall_state", string(properties.FirewallState)) + if model := resp.Model; model != nil { + if location := model.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } - if config := properties.EncryptionConfig; config != nil { - d.Set("encryption_type", string(config.Type)) + if properties := model.Properties; properties != nil { + tier := "" + if properties.CurrentTier != nil { + tier = string(*properties.CurrentTier) + } + d.Set("tier", tier) + + encryptionState := "" + if properties.EncryptionState != nil { + encryptionState = string(*properties.EncryptionState) + } + d.Set("encryption_state", encryptionState) + + firewallState := "" + if properties.FirewallState != nil { + firewallState = string(*properties.FirewallState) + } + d.Set("firewall_state", firewallState) + + firewallAllowAzureIps := "" + if properties.FirewallAllowAzureIps != nil { + firewallAllowAzureIps = string(*properties.FirewallAllowAzureIps) + } + d.Set("firewall_allow_azure_ips", firewallAllowAzureIps) + + if config := properties.EncryptionConfig; config != nil { + d.Set("encryption_type", string(config.Type)) + } } - } - return tags.FlattenAndSet(d, resp.Tags) + return tags.FlattenAndSet(d, flattenTags(model.Tags)) + } + return nil } diff --git a/internal/services/datalake/data_lake_store_firewall_rule_resource.go b/internal/services/datalake/data_lake_store_firewall_rule_resource.go index 8d751ae8a281..c19e47b5eeee 100644 --- a/internal/services/datalake/data_lake_store_firewall_rule_resource.go +++ b/internal/services/datalake/data_lake_store_firewall_rule_resource.go @@ -5,17 +5,16 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" commonValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/firewallrules" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) func resourceDataLakeStoreFirewallRule() *pluginsdk.Resource { @@ -71,22 +70,22 @@ func resourceDataLakeStoreFirewallRule() *pluginsdk.Resource { func resourceArmDateLakeStoreAccountFirewallRuleCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Datalake.StoreFirewallRulesClient - subscriptionId := meta.(*clients.Client).Datalake.StoreFirewallRulesClient.SubscriptionID + subscriptionId := meta.(*clients.Client).Datalake.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewFirewallRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("account_name").(string), d.Get("name").(string)) + id := firewallrules.NewFirewallRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("account_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, id.ResourceGroup, id.AccountName, id.Name) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Date Lake Store Firewall Rule %s: %+v", id, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("retreiving %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_data_lake_store_firewall_rule", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_data_lake_store_firewall_rule", id.ID()) } } @@ -95,15 +94,15 @@ func resourceArmDateLakeStoreAccountFirewallRuleCreateUpdate(d *pluginsdk.Resour log.Printf("[INFO] preparing arguments for Date Lake Store Firewall Rule creation %s", id) - dateLakeStore := account.CreateOrUpdateFirewallRuleParameters{ - CreateOrUpdateFirewallRuleProperties: &account.CreateOrUpdateFirewallRuleProperties{ - StartIPAddress: utils.String(startIPAddress), - EndIPAddress: utils.String(endIPAddress), + dateLakeStore := firewallrules.CreateOrUpdateFirewallRuleParameters{ + Properties: firewallrules.CreateOrUpdateFirewallRuleProperties{ + StartIpAddress: startIPAddress, + EndIpAddress: endIPAddress, }, } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.AccountName, id.Name, dateLakeStore); err != nil { - return fmt.Errorf("issuing create request for Data Lake Store %q (Resource Group %q): %+v", id.AccountName, id.ResourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id, dateLakeStore); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } d.SetId(id.ID()) @@ -116,28 +115,30 @@ func resourceArmDateLakeStoreAccountFirewallRuleRead(d *pluginsdk.ResourceData, ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.FirewallRuleID(d.Id()) + id, err := firewallrules.ParseFirewallRuleID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.AccountName, id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[WARN] Data Lake Store Firewall Rule %s was not found", id) d.SetId("") return nil } - return fmt.Errorf("making Read request on Azure Data Lake Store Firewall Rule %s: %+v", id, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } d.Set("name", id.Name) d.Set("account_name", id.AccountName) d.Set("resource_group_name", id.ResourceGroup) - if props := resp.FirewallRuleProperties; props != nil { - d.Set("start_ip_address", props.StartIPAddress) - d.Set("end_ip_address", props.EndIPAddress) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("start_ip_address", props.StartIpAddress) + d.Set("end_ip_address", props.EndIpAddress) + } } return nil @@ -148,17 +149,17 @@ func resourceArmDateLakeStoreAccountFirewallRuleDelete(d *pluginsdk.ResourceData ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.FirewallRuleID(d.Id()) + id, err := firewallrules.ParseFirewallRuleID(d.Id()) if err != nil { return err } - resp, err := client.Delete(ctx, id.ResourceGroup, id.AccountName, id.Name) + resp, err := client.Delete(ctx, *id) if err != nil { - if response.WasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return nil } - return fmt.Errorf("issuing delete request for Data Lake Store Firewall Rule %s: %+v", id, err) + return fmt.Errorf("deleting %s: %+v", id, err) } return nil diff --git a/internal/services/datalake/data_lake_store_firewall_rule_resource_test.go b/internal/services/datalake/data_lake_store_firewall_rule_resource_test.go index aa9397a86a72..a6ca0e18dc89 100644 --- a/internal/services/datalake/data_lake_store_firewall_rule_resource_test.go +++ b/internal/services/datalake/data_lake_store_firewall_rule_resource_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/firewallrules" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -101,17 +101,17 @@ func TestAccDataLakeStoreFirewallRule_azureServices(t *testing.T) { } func (t DataLakeStoreFirewallRuleResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.FirewallRuleID(state.ID) + id, err := firewallrules.ParseFirewallRuleID(state.ID) if err != nil { return nil, err } - resp, err := clients.Datalake.StoreFirewallRulesClient.Get(ctx, id.ResourceGroup, id.AccountName, id.Name) + resp, err := clients.Datalake.StoreFirewallRulesClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Date Lake Store Firewall Rule %s: %v", id, err) + return nil, fmt.Errorf("retrieving %s: %v", id, err) } - return utils.Bool(resp.FirewallRuleProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (DataLakeStoreFirewallRuleResource) basic(data acceptance.TestData, startIP, endIP string) string { diff --git a/internal/services/datalake/data_lake_store_resource.go b/internal/services/datalake/data_lake_store_resource.go index c037cd258bd7..307c0911e658 100644 --- a/internal/services/datalake/data_lake_store_resource.go +++ b/internal/services/datalake/data_lake_store_resource.go @@ -5,18 +5,19 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/identity" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/accounts" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/suppress" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) func resourceDataLakeStore() *pluginsdk.Resource { @@ -53,27 +54,27 @@ func resourceDataLakeStore() *pluginsdk.Resource { "tier": { Type: pluginsdk.TypeString, Optional: true, - Default: string(account.Consumption), + Default: string(accounts.TierTypeConsumption), DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(account.Consumption), - string(account.Commitment1TB), - string(account.Commitment10TB), - string(account.Commitment100TB), - string(account.Commitment500TB), - string(account.Commitment1PB), - string(account.Commitment5PB), + string(accounts.TierTypeConsumption), + string(accounts.TierTypeCommitmentOneTB), + string(accounts.TierTypeCommitmentOneZeroTB), + string(accounts.TierTypeCommitmentOneZeroZeroTB), + string(accounts.TierTypeCommitmentFiveZeroZeroTB), + string(accounts.TierTypeCommitmentOnePB), + string(accounts.TierTypeCommitmentFivePB), }, true), }, "encryption_state": { Type: pluginsdk.TypeString, Optional: true, - Default: string(account.Enabled), + Default: string(accounts.EncryptionStateEnabled), ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(account.Enabled), - string(account.Disabled), + string(accounts.EncryptionStateEnabled), + string(accounts.EncryptionStateDisabled), }, true), DiffSuppressFunc: suppress.CaseDifference, }, @@ -84,7 +85,7 @@ func resourceDataLakeStore() *pluginsdk.Resource { Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(account.ServiceManaged), + string(accounts.EncryptionConfigTypeServiceManaged), }, true), DiffSuppressFunc: suppress.CaseDifference, }, @@ -92,10 +93,10 @@ func resourceDataLakeStore() *pluginsdk.Resource { "firewall_state": { Type: pluginsdk.TypeString, Optional: true, - Default: string(account.FirewallStateEnabled), + Default: string(accounts.FirewallStateEnabled), ValidateFunc: validation.StringInSlice([]string{ - string(account.FirewallStateEnabled), - string(account.FirewallStateDisabled), + string(accounts.FirewallStateEnabled), + string(accounts.FirewallStateDisabled), }, true), DiffSuppressFunc: suppress.CaseDifference, }, @@ -103,10 +104,10 @@ func resourceDataLakeStore() *pluginsdk.Resource { "firewall_allow_azure_ips": { Type: pluginsdk.TypeString, Optional: true, - Default: string(account.FirewallAllowAzureIpsStateEnabled), + Default: string(accounts.FirewallAllowAzureIpsStateEnabled), ValidateFunc: validation.StringInSlice([]string{ - string(account.FirewallAllowAzureIpsStateEnabled), - string(account.FirewallAllowAzureIpsStateDisabled), + string(accounts.FirewallAllowAzureIpsStateEnabled), + string(accounts.FirewallAllowAzureIpsStateDisabled), }, true), DiffSuppressFunc: suppress.CaseDifference, }, @@ -116,30 +117,7 @@ func resourceDataLakeStore() *pluginsdk.Resource { Computed: true, }, - "identity": { - Type: pluginsdk.TypeList, - Optional: true, - MaxItems: 1, - Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "type": { - Type: pluginsdk.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "SystemAssigned", - }, false), - }, - "principal_id": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "tenant_id": { - Type: pluginsdk.TypeString, - Computed: true, - }, - }, - }, - }, + "identity": identity.SystemAssigned{}.Schema(), "tags": tags.Schema(), }, @@ -148,61 +126,55 @@ func resourceDataLakeStore() *pluginsdk.Resource { func resourceArmDateLakeStoreCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Datalake.StoreAccountsClient - subscriptionId := meta.(*clients.Client).Datalake.StoreAccountsClient.SubscriptionID + subscriptionId := meta.(*clients.Client).Datalake.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + id := accounts.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, id.ResourceGroup, id.Name) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Data Lake Store %s: %+v", id, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("retrieving %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_data_lake_store", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_data_lake_store", id.ID()) } } location := azure.NormalizeLocation(d.Get("location").(string)) - tier := d.Get("tier").(string) + tier := accounts.TierType(d.Get("tier").(string)) - encryptionState := account.EncryptionState(d.Get("encryption_state").(string)) - encryptionType := account.EncryptionConfigType(d.Get("encryption_type").(string)) - firewallState := account.FirewallState(d.Get("firewall_state").(string)) - firewallAllowAzureIPs := account.FirewallAllowAzureIpsState(d.Get("firewall_allow_azure_ips").(string)) + encryptionState := accounts.EncryptionState(d.Get("encryption_state").(string)) + encryptionType := accounts.EncryptionConfigType(d.Get("encryption_type").(string)) + firewallState := accounts.FirewallState(d.Get("firewall_state").(string)) + firewallAllowAzureIPs := accounts.FirewallAllowAzureIpsState(d.Get("firewall_allow_azure_ips").(string)) t := d.Get("tags").(map[string]interface{}) log.Printf("[INFO] preparing arguments for Data Lake Store creation %s", id) - dateLakeStore := account.CreateDataLakeStoreAccountParameters{ - Location: &location, - Tags: tags.Expand(t), + dateLakeStore := accounts.CreateDataLakeStoreAccountParameters{ + Location: location, + Tags: expandTags(t), Identity: expandDataLakeStoreIdentity(d.Get("identity").([]interface{})), - CreateDataLakeStoreAccountProperties: &account.CreateDataLakeStoreAccountProperties{ - NewTier: account.TierType(tier), - FirewallState: firewallState, - FirewallAllowAzureIps: firewallAllowAzureIPs, - EncryptionState: encryptionState, + Properties: &accounts.CreateDataLakeStoreAccountProperties{ + NewTier: &tier, + FirewallState: &firewallState, + FirewallAllowAzureIps: &firewallAllowAzureIPs, + EncryptionState: &encryptionState, - EncryptionConfig: &account.EncryptionConfig{ + EncryptionConfig: &accounts.EncryptionConfig{ Type: encryptionType, }, }, } - future, err := client.Create(ctx, id.ResourceGroup, id.Name, dateLakeStore) - if err != nil { - return fmt.Errorf("issuing create request for Data Lake Store %s: %+v", id, err) + if err := client.CreateThenPoll(ctx, id, dateLakeStore); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("creating Data Lake Store %s: %+v", id, err) - } - d.SetId(id.ID()) return resourceArmDateLakeStoreRead(d, meta) @@ -213,32 +185,27 @@ func resourceArmDateLakeStoreUpdate(d *pluginsdk.ResourceData, meta interface{}) ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AccountID(d.Id()) + id, err := accounts.ParseAccountID(d.Id()) if err != nil { return err } - tier := d.Get("tier").(string) - firewallState := account.FirewallState(d.Get("firewall_state").(string)) - firewallAllowAzureIPs := account.FirewallAllowAzureIpsState(d.Get("firewall_allow_azure_ips").(string)) + tier := accounts.TierType(d.Get("tier").(string)) + firewallState := accounts.FirewallState(d.Get("firewall_state").(string)) + firewallAllowAzureIPs := accounts.FirewallAllowAzureIpsState(d.Get("firewall_allow_azure_ips").(string)) t := d.Get("tags").(map[string]interface{}) - props := account.UpdateDataLakeStoreAccountParameters{ - UpdateDataLakeStoreAccountProperties: &account.UpdateDataLakeStoreAccountProperties{ - NewTier: account.TierType(tier), - FirewallState: firewallState, - FirewallAllowAzureIps: firewallAllowAzureIPs, + props := accounts.UpdateDataLakeStoreAccountParameters{ + Properties: &accounts.UpdateDataLakeStoreAccountProperties{ + NewTier: &tier, + FirewallState: &firewallState, + FirewallAllowAzureIps: &firewallAllowAzureIPs, }, - Tags: tags.Expand(t), - } - - future, err := client.Update(ctx, id.ResourceGroup, id.Name, props) - if err != nil { - return fmt.Errorf("issuing update request for Data Lake Store %s: %+v", id, err) + Tags: expandTags(t), } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for the update of Data Lake Store %s to complete: %+v", id, err) + if err := client.UpdateThenPoll(ctx, *id, props); err != nil { + return fmt.Errorf("updating %s: %+v", id, err) } return resourceArmDateLakeStoreRead(d, meta) @@ -249,47 +216,69 @@ func resourceArmDateLakeStoreRead(d *pluginsdk.ResourceData, meta interface{}) e ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AccountID(d.Id()) + id, err := accounts.ParseAccountID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[WARN] Data Lake Store Account %s was not found", id) d.SetId("") return nil } - return fmt.Errorf("making Read request on Azure Data Lake Store %s: %+v", id, err) + return fmt.Errorf("retreiving %s: %+v", id, err) } d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - if err := d.Set("identity", flattenDataLakeStoreIdentity(resp.Identity)); err != nil { - return fmt.Errorf("flattening identity on Data Lake Store %s: %+v", id, err) - } + if model := resp.Model; model != nil { + if location := model.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } - if properties := resp.DataLakeStoreAccountProperties; properties != nil { - d.Set("tier", string(properties.CurrentTier)) + if err := d.Set("identity", flattenDataLakeStoreIdentity(model.Identity)); err != nil { + return fmt.Errorf("flattening identity on Data Lake Store %s: %+v", id, err) + } - d.Set("encryption_state", string(properties.EncryptionState)) - d.Set("firewall_state", string(properties.FirewallState)) - d.Set("firewall_allow_azure_ips", string(properties.FirewallAllowAzureIps)) + if properties := model.Properties; properties != nil { + tier := "" + if properties.CurrentTier != nil { + tier = string(*properties.CurrentTier) + } + d.Set("tier", tier) - if config := properties.EncryptionConfig; config != nil { - d.Set("encryption_type", string(config.Type)) + encryptionState := "" + if properties.EncryptionState != nil { + encryptionState = string(*properties.EncryptionState) + } + d.Set("encryption_state", encryptionState) + + firewallState := "" + if properties.FirewallState != nil { + firewallState = string(*properties.FirewallState) + } + d.Set("firewall_state", firewallState) + + firewallAllowAzureIps := "" + if properties.FirewallAllowAzureIps != nil { + firewallAllowAzureIps = string(*properties.FirewallAllowAzureIps) + } + d.Set("firewall_allow_azure_ips", firewallAllowAzureIps) + + if config := properties.EncryptionConfig; config != nil { + d.Set("encryption_type", string(config.Type)) + } + + d.Set("endpoint", properties.Endpoint) } - d.Set("endpoint", properties.Endpoint) + return tags.FlattenAndSet(d, flattenTags(model.Tags)) } - - return tags.FlattenAndSet(d, resp.Tags) + return nil } func resourceArmDateLakeStoreDelete(d *pluginsdk.ResourceData, meta interface{}) error { @@ -297,48 +286,43 @@ func resourceArmDateLakeStoreDelete(d *pluginsdk.ResourceData, meta interface{}) ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.AccountID(d.Id()) + id, err := accounts.ParseAccountID(d.Id()) if err != nil { return err } - future, err := client.Delete(ctx, id.ResourceGroup, id.Name) - if err != nil { - return fmt.Errorf("deleting Data Lake Store %s: %+v", id, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for deletion of Data Lake Store %s: %+v", id, err) + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) } return nil } -func expandDataLakeStoreIdentity(input []interface{}) *account.EncryptionIdentity { +func expandDataLakeStoreIdentity(input []interface{}) *identity.SystemAssignedIdentity { if len(input) == 0 { return nil } v := input[0].(map[string]interface{}) - return &account.EncryptionIdentity{ - Type: utils.String(v["type"].(string)), + return &identity.SystemAssignedIdentity{ + Type: identity.Type(v["type"].(string)), } } -func flattenDataLakeStoreIdentity(identity *account.EncryptionIdentity) []interface{} { +func flattenDataLakeStoreIdentity(identity *identity.SystemAssignedIdentity) []interface{} { if identity == nil { return []interface{}{} } principalID := "" - if identity.PrincipalID != nil { - principalID = identity.PrincipalID.String() + if identity.PrincipalId != nil { + principalID = *identity.PrincipalId } tenantID := "" - if identity.TenantID != nil { - tenantID = identity.TenantID.String() + if identity.TenantId != nil { + tenantID = *identity.TenantId } return []interface{}{ diff --git a/internal/services/datalake/data_lake_store_resource_test.go b/internal/services/datalake/data_lake_store_resource_test.go index 9995b1a85a41..72d246bdec4f 100644 --- a/internal/services/datalake/data_lake_store_resource_test.go +++ b/internal/services/datalake/data_lake_store_resource_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/datashare/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/accounts" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -172,17 +172,17 @@ func TestAccDataLakeStore_withTags(t *testing.T) { } func (t DataLakeStoreResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.AccountID(state.ID) + id, err := accounts.ParseAccountID(state.ID) if err != nil { return nil, err } - resp, err := clients.Datalake.StoreAccountsClient.Get(ctx, id.ResourceGroup, id.Name) + resp, err := clients.Datalake.StoreAccountsClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Date Lake Store %s: %+v", id, err) + return nil, fmt.Errorf("retrieving %s: %+v", id, err) } - return utils.Bool(resp.DataLakeStoreAccountProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (DataLakeStoreResource) basic(data acceptance.TestData) string { diff --git a/internal/services/datalake/data_lake_store_virtual_network_rule_resource.go b/internal/services/datalake/data_lake_store_virtual_network_rule_resource.go index 1bceb6e1e8d4..06ae8882366c 100644 --- a/internal/services/datalake/data_lake_store_virtual_network_rule_resource.go +++ b/internal/services/datalake/data_lake_store_virtual_network_rule_resource.go @@ -5,17 +5,16 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/virtualnetworkrules" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/validate" networkValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) func resourceDataLakeStoreVirtualNetworkRule() *pluginsdk.Resource { @@ -65,35 +64,35 @@ func resourceDataLakeStoreVirtualNetworkRule() *pluginsdk.Resource { func resourceDataLakeStoreVirtualNetworkRuleCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Datalake.VirtualNetworkRulesClient - subscriptionId := meta.(*clients.Client).Datalake.VirtualNetworkRulesClient.SubscriptionID + subscriptionId := meta.(*clients.Client).Datalake.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewVirtualNetworkRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("account_name").(string), d.Get("name").(string)) + id := virtualnetworkrules.NewVirtualNetworkRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("account_name").(string), d.Get("name").(string)) virtualNetworkSubnetId := d.Get("subnet_id").(string) if d.IsNewResource() { - existing, err := client.Get(ctx, id.ResourceGroup, id.AccountName, id.Name) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Data Lake Store Virtual Network Rule %s: %+v", id, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_data_lake_store_virtual_network_rule", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_data_lake_store_virtual_network_rule", id.ID()) } } - parameters := account.CreateOrUpdateVirtualNetworkRuleParameters{ - CreateOrUpdateVirtualNetworkRuleProperties: &account.CreateOrUpdateVirtualNetworkRuleProperties{ - SubnetID: utils.String(virtualNetworkSubnetId), + parameters := virtualnetworkrules.CreateOrUpdateVirtualNetworkRuleParameters{ + Properties: virtualnetworkrules.CreateOrUpdateVirtualNetworkRuleProperties{ + SubnetId: virtualNetworkSubnetId, }, } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.AccountName, id.Name, parameters); err != nil { - return fmt.Errorf("creating Data Lake Store Virtual Network Rule %s: %+v", id, err) + if _, err := client.CreateOrUpdate(ctx, id, parameters); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } d.SetId(id.ID()) @@ -106,28 +105,30 @@ func resourceDataLakeStoreVirtualNetworkRuleRead(d *pluginsdk.ResourceData, meta ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.VirtualNetworkRuleID(d.Id()) + id, err := virtualnetworkrules.ParseVirtualNetworkRuleID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.AccountName, id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[INFO] Data Lake Store Virtual Network Rule %s was not found - removing from state", id) d.SetId("") return nil } - return fmt.Errorf("retrieving Virtual Network Rule %s: %+v", id, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } d.Set("name", id.Name) d.Set("account_name", id.AccountName) d.Set("resource_group_name", id.ResourceGroup) - if props := resp.VirtualNetworkRuleProperties; props != nil { - d.Set("subnet_id", props.SubnetID) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("subnet_id", props.SubnetId) + } } return nil @@ -138,17 +139,17 @@ func resourceDataLakeStoreVirtualNetworkRuleDelete(d *pluginsdk.ResourceData, me ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.VirtualNetworkRuleID(d.Id()) + id, err := virtualnetworkrules.ParseVirtualNetworkRuleID(d.Id()) if err != nil { return err } - resp, err := client.Delete(ctx, id.ResourceGroup, id.AccountName, id.Name) + resp, err := client.Delete(ctx, *id) if err != nil { - if response.WasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return nil } - return fmt.Errorf("deleting Data Lake Store Virtual Network Rule %s: %+v", id, err) + return fmt.Errorf("deleting %s: %+v", id, err) } return nil diff --git a/internal/services/datalake/data_lake_store_virtual_network_rule_resource_test.go b/internal/services/datalake/data_lake_store_virtual_network_rule_resource_test.go index fe58150d0b38..110f02cd8384 100644 --- a/internal/services/datalake/data_lake_store_virtual_network_rule_resource_test.go +++ b/internal/services/datalake/data_lake_store_virtual_network_rule_resource_test.go @@ -7,10 +7,11 @@ import ( "strconv" "testing" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/sdk/datalakestore/2016-11-01/virtualnetworkrules" "github.com/hashicorp/terraform-provider-azurerm/internal/services/datalake/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" @@ -300,29 +301,29 @@ func TestResourceAzureRMDataLakeStoreVirtualNetworkRule_validNameValidation(t *t } func (r DataLakeStoreVirtualNetworkRuleResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.VirtualNetworkRuleID(state.ID) + id, err := virtualnetworkrules.ParseVirtualNetworkRuleID(state.ID) if err != nil { return nil, err } - resp, err := client.Datalake.VirtualNetworkRulesClient.Get(ctx, id.ResourceGroup, id.AccountName, id.Name) + resp, err := client.Datalake.VirtualNetworkRulesClient.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } - return nil, fmt.Errorf("retrieving Data Lake Store Virtual Network Rule %s: %+v", id, err) + return nil, fmt.Errorf("retrieving %s: %+v", id, err) } return utils.Bool(true), nil } func (r DataLakeStoreVirtualNetworkRuleResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.VirtualNetworkRuleID(state.ID) + id, err := virtualnetworkrules.ParseVirtualNetworkRuleID(state.ID) if err != nil { return nil, err } rulesClient := client.Datalake.VirtualNetworkRulesClient - if _, err = rulesClient.Delete(ctx, id.ResourceGroup, id.AccountName, id.Name); err != nil { - return nil, fmt.Errorf("deleting Data Lake Store Virtual Network Rule %q (Account %q / Resource Group %q): %+v", id.Name, id.AccountName, id.ResourceGroup, err) + if _, err = rulesClient.Delete(ctx, *id); err != nil { + return nil, fmt.Errorf("deleting %s: %+v", id, err) } return utils.Bool(true), nil diff --git a/internal/services/datalake/transition.go b/internal/services/datalake/transition.go new file mode 100644 index 000000000000..b482f242189d --- /dev/null +++ b/internal/services/datalake/transition.go @@ -0,0 +1,23 @@ +package datalake + +import "github.com/hashicorp/terraform-provider-azurerm/utils" + +func expandTags(input map[string]interface{}) *map[string]string { + output := make(map[string]string) + for k, v := range input { + output[k] = v.(string) + } + return &output +} + +func flattenTags(input *map[string]string) map[string]*string { + output := make(map[string]*string) + + if input != nil { + for k, v := range *input { + output[k] = utils.String(v) + } + } + + return output +} diff --git a/internal/services/domainservices/active_directory_domain_service_test.go b/internal/services/domainservices/active_directory_domain_service_test.go index 2b735a6fcb96..952337b2535a 100644 --- a/internal/services/domainservices/active_directory_domain_service_test.go +++ b/internal/services/domainservices/active_directory_domain_service_test.go @@ -297,8 +297,9 @@ resource "azuread_service_principal" "test" { } resource "azuread_group" "test" { - name = "AAD DC Administrators" - description = "Delegated group to administer Azure AD Domain Services" + display_name = "AAD DC Administrators" + description = "Delegated group to administer Azure AD Domain Services" + security_enabled = true } resource "azuread_user" "test" { diff --git a/internal/services/frontdoor/frontdoor_resource.go b/internal/services/frontdoor/frontdoor_resource.go index ffabd1c88ce1..327d98e86a4b 100644 --- a/internal/services/frontdoor/frontdoor_resource.go +++ b/internal/services/frontdoor/frontdoor_resource.go @@ -25,9 +25,9 @@ import ( func resourceFrontDoor() *pluginsdk.Resource { return &pluginsdk.Resource{ - Create: resourceFrontDoorCreateUpdate, + Create: resourceFrontDoorCreate, Read: resourceFrontDoorRead, - Update: resourceFrontDoorCreateUpdate, + Update: resourceFrontDoorUpdate, Delete: resourceFrontDoorDelete, Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { @@ -549,7 +549,7 @@ func resourceFrontDoor() *pluginsdk.Resource { } } -func resourceFrontDoorCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { +func resourceFrontDoorCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Frontdoor.FrontDoorsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -559,17 +559,15 @@ func resourceFrontDoorCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) subscriptionId := meta.(*clients.Client).Account.SubscriptionId id := frontdoors.NewFrontDoorID(subscriptionId, resourceGroup, name) - if d.IsNewResource() { - resp, err := client.Get(ctx, id) - if err != nil { - if !response.WasNotFound(resp.HttpResponse) { - return fmt.Errorf("checking for presence of %s: %+v", id, err) - } - } + resp, err := client.Get(ctx, id) + if err != nil { if !response.WasNotFound(resp.HttpResponse) { - return tf.ImportAsExistsError("azurerm_frontdoor", id.ID()) + return fmt.Errorf("checking for presence of %s: %+v", id, err) } } + if !response.WasNotFound(resp.HttpResponse) { + return tf.ImportAsExistsError("azurerm_frontdoor", id.ID()) + } // remove in 3.0 // due to a change in the RP, if a Frontdoor exists in a location other than 'Global' it may continue to @@ -605,31 +603,124 @@ func resourceFrontDoorCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) backendPoolsSettings := d.Get("enforce_backend_pools_certificate_name_check").(bool) backendPoolsSendReceiveTimeoutSeconds := int64(d.Get("backend_pools_send_receive_timeout_seconds").(int)) enabledState := expandFrontDoorEnabledState(d.Get("load_balancer_enabled").(bool)) - explicitResourceOrder := d.Get("explicit_resource_order").([]interface{}) t := d.Get("tags").(map[string]interface{}) + frontDoorParameters := frontdoors.FrontDoor{ + Location: utils.String(location), + Properties: &frontdoors.FrontDoorProperties{ + FriendlyName: utils.String(friendlyName), + RoutingRules: expandFrontDoorRoutingRule(routingRules, id, nil), + BackendPools: expandFrontDoorBackendPools(backendPools, id), + BackendPoolsSettings: expandFrontDoorBackendPoolsSettings(backendPoolsSettings, backendPoolsSendReceiveTimeoutSeconds), + FrontendEndpoints: expandFrontDoorFrontendEndpoint(frontendEndpoints, id), + HealthProbeSettings: expandFrontDoorHealthProbeSettingsModel(healthProbeSettings, id), + LoadBalancingSettings: expandFrontDoorLoadBalancingSettingsModel(loadBalancingSettings, id), + EnabledState: &enabledState, + }, + Tags: expandTags(t), + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, frontDoorParameters); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + d.Set("explicit_resource_order", flattenExplicitResourceOrder(backendPools, frontendEndpoints, routingRules, loadBalancingSettings, healthProbeSettings, id)) + + d.SetId(id.ID()) + return resourceFrontDoorRead(d, meta) +} + +func resourceFrontDoorUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Frontdoor.FrontDoorsClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + id := frontdoors.NewFrontDoorID(subscriptionId, resourceGroup, name) + + // remove in 3.0 + // due to a change in the RP, if a Frontdoor exists in a location other than 'Global' it may continue to + // exist in that location, if this is a brand new Frontdoor it must be created in the 'Global' location + location := "Global" + cfgLocation, hasLocation := d.GetOk("location") + + exists, err := client.Get(ctx, id) + if err != nil || exists.Model == nil { + if !response.WasNotFound(exists.HttpResponse) { + return fmt.Errorf("locating %s: %+v", id, err) + } + } else { + location = azure.NormalizeLocation(*exists.Model.Location) + } + + if hasLocation { + if location != azure.NormalizeLocation(cfgLocation) { + return fmt.Errorf("the Front Door %q (Resource Group %q) already exists in %q and cannot be moved to the %q location", name, resourceGroup, location, cfgLocation) + } + } + + existingModel := *exists.Model + + if d.HasChange("friendly_name") { + existingModel.Properties.FriendlyName = utils.String(d.Get("friendly_name").(string)) + } + + routingRules := d.Get("routing_rule").([]interface{}) + if d.HasChange("routing_rule") { + rulesEngines := make(map[string]*frontdoors.SubResource) + if existingModel.Properties != nil && existingModel.Properties.RoutingRules != nil { + for _, rule := range *existingModel.Properties.RoutingRules { + if rule.Properties != nil && rule.Properties.RulesEngine != nil { + rulesEngines[*rule.Name] = rule.Properties.RulesEngine + } + } + } + existingModel.Properties.RoutingRules = expandFrontDoorRoutingRule(routingRules, id, &rulesEngines) + } + + loadBalancingSettings := d.Get("backend_pool_load_balancing").([]interface{}) + if d.HasChange("backend_pool_load_balancing") { + existingModel.Properties.LoadBalancingSettings = expandFrontDoorLoadBalancingSettingsModel(loadBalancingSettings, id) + } + + healthProbeSettings := d.Get("backend_pool_health_probe").([]interface{}) + if d.HasChange("backend_pool_health_probe") { + existingModel.Properties.HealthProbeSettings = expandFrontDoorHealthProbeSettingsModel(healthProbeSettings, id) + } + + backendPools := d.Get("backend_pool").([]interface{}) + if d.HasChange("backend_pool") { + existingModel.Properties.BackendPools = expandFrontDoorBackendPools(backendPools, id) + } + + frontendEndpoints := d.Get("frontend_endpoint").([]interface{}) + if d.HasChange("frontend_endpoint") { + existingModel.Properties.FrontendEndpoints = expandFrontDoorFrontendEndpoint(frontendEndpoints, id) + } + + if d.HasChanges("enforce_backend_pools_certificate_name_check", "backend_pools_send_receive_timeout_seconds") { + existingModel.Properties.BackendPoolsSettings = expandFrontDoorBackendPoolsSettings(d.Get("enforce_backend_pools_certificate_name_check").(bool), int64(d.Get("backend_pools_send_receive_timeout_seconds").(int))) + } + + if d.HasChange("load_balancer_enabled") { + enabledState := expandFrontDoorEnabledState(d.Get("load_balancer_enabled").(bool)) + existingModel.Properties.EnabledState = &enabledState + } + + if d.HasChanges("tags") { + existingModel.Tags = expandTags(d.Get("tags").(map[string]interface{})) + } + // If the explicitResourceOrder is empty and it's not a new resource set the mapping table to the state file and return an error. // If the explicitResourceOrder is empty and it is a new resource it will run the CreateOrUpdate as expected // If the explicitResourceOrder is NOT empty and it is NOT a new resource it will run the CreateOrUpdate as expected - if len(explicitResourceOrder) == 0 && !d.IsNewResource() { + explicitResourceOrder := d.Get("explicit_resource_order").([]interface{}) + if len(explicitResourceOrder) == 0 { d.Set("explicit_resource_order", flattenExplicitResourceOrder(backendPools, frontendEndpoints, routingRules, loadBalancingSettings, healthProbeSettings, id)) } else { - frontDoorParameters := frontdoors.FrontDoor{ - Location: utils.String(location), - Properties: &frontdoors.FrontDoorProperties{ - FriendlyName: utils.String(friendlyName), - RoutingRules: expandFrontDoorRoutingRule(routingRules, id), - BackendPools: expandFrontDoorBackendPools(backendPools, id), - BackendPoolsSettings: expandFrontDoorBackendPoolsSettings(backendPoolsSettings, backendPoolsSendReceiveTimeoutSeconds), - FrontendEndpoints: expandFrontDoorFrontendEndpoint(frontendEndpoints, id), - HealthProbeSettings: expandFrontDoorHealthProbeSettingsModel(healthProbeSettings, id), - LoadBalancingSettings: expandFrontDoorLoadBalancingSettingsModel(loadBalancingSettings, id), - EnabledState: &enabledState, - }, - Tags: expandTags(t), - } - - if err := client.CreateOrUpdateThenPoll(ctx, id, frontDoorParameters); err != nil { + if err := client.CreateOrUpdateThenPoll(ctx, id, existingModel); err != nil { return fmt.Errorf("creating %s: %+v", id, err) } @@ -1040,7 +1131,7 @@ func expandFrontDoorLoadBalancingSettingsModel(input []interface{}, frontDoorId return &output } -func expandFrontDoorRoutingRule(input []interface{}, frontDoorId frontdoors.FrontDoorId) *[]frontdoors.RoutingRule { +func expandFrontDoorRoutingRule(input []interface{}, frontDoorId frontdoors.FrontDoorId, rulesEngines *map[string]*frontdoors.SubResource) *[]frontdoors.RoutingRule { if len(input) == 0 { return nil } @@ -1079,6 +1170,15 @@ func expandFrontDoorRoutingRule(input []interface{}, frontDoorId frontdoors.Fron RouteConfiguration: &routingConfiguration, }, } + + // Preserve existing rules engine for this routing rule + // https://github.com/hashicorp/terraform-provider-azurerm/issues/7455#issuecomment-882769364 + if rulesEngines != nil { + if rulesEngine, ok := (*rulesEngines)[name]; ok { + currentRoutingRule.Properties.RulesEngine = rulesEngine + } + } + output = append(output, currentRoutingRule) } @@ -1261,7 +1361,7 @@ func flattenExplicitResourceOrder(backendPools, frontendEndpoints, routingRules, } if len(routingRules) > 0 { var oldBlocks interface{} - flattenendRoutingRules, err := flattenFrontDoorRoutingRule(expandFrontDoorRoutingRule(routingRules, frontDoorId), oldBlocks, frontDoorId, make([]interface{}, 0)) + flattenendRoutingRules, err := flattenFrontDoorRoutingRule(expandFrontDoorRoutingRule(routingRules, frontDoorId, nil), oldBlocks, frontDoorId, make([]interface{}, 0)) if err == nil { for _, ids := range *flattenendRoutingRules { routingRule := ids.(map[string]interface{}) diff --git a/internal/services/hdinsight/hdinsight_cluster_resource_test.go b/internal/services/hdinsight/hdinsight_cluster_resource_test.go index 0e302db70b68..9e1f02728eb1 100644 --- a/internal/services/hdinsight/hdinsight_cluster_resource_test.go +++ b/internal/services/hdinsight/hdinsight_cluster_resource_test.go @@ -135,8 +135,9 @@ resource azurerm_subnet_network_security_group_association "test" { } resource "azuread_group" "test" { - display_name = "AAD DC Administrators %[3]s" - description = "Test for delegating group to administer Azure AD Domain Services" + display_name = "AAD DC Administrators %[3]s" + description = "Test for delegating group to administer Azure AD Domain Services" + security_enabled = true } data "azuread_domains" "test" { diff --git a/internal/services/hdinsight/hdinsight_kafka_cluster_resource_test.go b/internal/services/hdinsight/hdinsight_kafka_cluster_resource_test.go index 07fad5d071de..dd1a0cf4ca54 100644 --- a/internal/services/hdinsight/hdinsight_kafka_cluster_resource_test.go +++ b/internal/services/hdinsight/hdinsight_kafka_cluster_resource_test.go @@ -1289,7 +1289,8 @@ func (r HDInsightKafkaClusterResource) restProxy(data acceptance.TestData) strin provider "azuread" {} resource "azuread_group" "test" { - name = "acctesthdi-%d" + display_name = "acctesthdi-%d" + security_enabled = true } resource "azurerm_hdinsight_kafka_cluster" "test" { diff --git a/internal/services/keyvault/validate/child_id.go b/internal/services/keyvault/validate/child_id.go index f60922f2c331..b0a8b7a2277f 100644 --- a/internal/services/keyvault/validate/child_id.go +++ b/internal/services/keyvault/validate/child_id.go @@ -19,3 +19,17 @@ func KeyVaultChildID(i interface{}, k string) (warnings []string, errors []error return warnings, errors } + +func KeyVaultChildIDWithOptionalVersion(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return warnings, errors + } + + if _, err := parse.ParseOptionallyVersionedNestedItemID(v); err != nil { + errors = append(errors, fmt.Errorf("can not parse %q as a Key Vault Child resource id: %v", k, err)) + } + + return warnings, errors +} diff --git a/internal/services/machinelearning/machine_learning_workspace_resource.go b/internal/services/machinelearning/machine_learning_workspace_resource.go index ed1b7b49ce01..81d38c80d555 100644 --- a/internal/services/machinelearning/machine_learning_workspace_resource.go +++ b/internal/services/machinelearning/machine_learning_workspace_resource.go @@ -141,6 +141,27 @@ func resourceMachineLearningWorkspace() *pluginsdk.Resource { Optional: true, }, + "encryption": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key_vault_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: keyVaultValidate.VaultID, + }, + "key_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.IsURLWithHTTPorHTTPS, + }, + }, + }, + }, + "friendly_name": { Type: pluginsdk.TypeString, Optional: true, @@ -204,6 +225,7 @@ func resourceMachineLearningWorkspaceCreate(d *pluginsdk.ResourceData, meta inte ApplicationInsights: utils.String(d.Get("application_insights_id").(string)), KeyVault: utils.String(d.Get("key_vault_id").(string)), AllowPublicAccessWhenBehindVnet: utils.Bool(d.Get("public_network_access_enabled").(bool)), + Encryption: expandMachineLearningWorkspaceEncryption(d.Get("encryption").([]interface{})), }, } @@ -290,6 +312,10 @@ func resourceMachineLearningWorkspaceRead(d *pluginsdk.ResourceData, meta interf return fmt.Errorf("flattening identity on Workspace %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } + if err := d.Set("encryption", flattenMachineLearningWorkspaceEncryption(resp.Encryption)); err != nil { + return fmt.Errorf("flattening encryption on Workspace %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + return tags.FlattenAndSet(d, resp.Tags) } @@ -391,3 +417,34 @@ func flattenMachineLearningWorkspaceIdentity(identity *machinelearningservices.I }, } } + +func expandMachineLearningWorkspaceEncryption(input []interface{}) *machinelearningservices.EncryptionProperty { + if len(input) == 0 || input[0] == nil { + return nil + } + + raw := input[0].(map[string]interface{}) + encryption := &machinelearningservices.EncryptionProperty{ + Status: machinelearningservices.EncryptionStatusEnabled, + } + + encryption.KeyVaultProperties = &machinelearningservices.KeyVaultProperties{ + KeyVaultArmID: utils.String(raw["key_vault_id"].(string)), + KeyIdentifier: utils.String(raw["key_id"].(string)), + } + + return encryption +} + +func flattenMachineLearningWorkspaceEncryption(encryption *machinelearningservices.EncryptionProperty) []interface{} { + if encryption == nil { + return []interface{}{} + } + + return []interface{}{ + map[string]interface{}{ + "key_vault_id": *encryption.KeyVaultProperties.KeyVaultArmID, + "key_id": *encryption.KeyVaultProperties.KeyIdentifier, + }, + } +} diff --git a/internal/services/machinelearning/machine_learning_workspace_resource_test.go b/internal/services/machinelearning/machine_learning_workspace_resource_test.go index 55ea4fbe5274..1c08d7b94525 100644 --- a/internal/services/machinelearning/machine_learning_workspace_resource_test.go +++ b/internal/services/machinelearning/machine_learning_workspace_resource_test.go @@ -209,6 +209,24 @@ resource "azurerm_container_registry" "test" { admin_enabled = true } +resource "azurerm_key_vault_key" "test" { + name = "acctest-kv-key-%[2]d" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + depends_on = [azurerm_key_vault.test, azurerm_key_vault_access_policy.test] +} + resource "azurerm_machine_learning_workspace" "test" { name = "acctest-MLW-%[2]d" location = azurerm_resource_group.test.location @@ -228,6 +246,12 @@ resource "azurerm_machine_learning_workspace" "test" { type = "SystemAssigned" } + encryption { + key_vault_id = azurerm_key_vault.test.id + key_id = azurerm_key_vault_key.test.id + } + + tags = { ENV = "Test" } @@ -248,6 +272,24 @@ resource "azurerm_container_registry" "test" { admin_enabled = true } +resource "azurerm_key_vault_key" "test" { + name = "acctest-kv-key-%[2]d" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + depends_on = [azurerm_key_vault.test, azurerm_key_vault_access_policy.test] +} + resource "azurerm_machine_learning_workspace" "test" { name = "acctest-MLW-%[2]d" location = azurerm_resource_group.test.location @@ -267,6 +309,11 @@ resource "azurerm_machine_learning_workspace" "test" { type = "SystemAssigned" } + encryption { + key_vault_id = azurerm_key_vault.test.id + key_id = azurerm_key_vault_key.test.id + } + tags = { ENV = "Test" FOO = "Updated" @@ -298,7 +345,11 @@ resource "azurerm_machine_learning_workspace" "import" { func (r WorkspaceResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { - features {} + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } } data "azurerm_client_config" "current" {} @@ -326,6 +377,19 @@ resource "azurerm_key_vault" "test" { purge_protection_enabled = true } +resource "azurerm_key_vault_access_policy" "test" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "Create", + "Get", + "Delete", + "Purge", + ] +} + resource "azurerm_storage_account" "test" { name = "acctestsa%[4]d" location = azurerm_resource_group.test.location diff --git a/internal/services/maps/maps_account_data_source.go b/internal/services/maps/maps_account_data_source.go index 47d8c8533993..24ada6d84b65 100644 --- a/internal/services/maps/maps_account_data_source.go +++ b/internal/services/maps/maps_account_data_source.go @@ -77,8 +77,8 @@ func dataSourceMapsAccountRead(d *pluginsdk.ResourceData, meta interface{}) erro d.SetId(id.ID()) - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.AccountName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("sku_name", model.Sku.Name) diff --git a/internal/services/maps/maps_account_resource.go b/internal/services/maps/maps_account_resource.go index 4800c5dd0d49..02e68e99b77d 100644 --- a/internal/services/maps/maps_account_resource.go +++ b/internal/services/maps/maps_account_resource.go @@ -141,8 +141,8 @@ func resourceMapsAccountRead(d *pluginsdk.ResourceData, meta interface{}) error return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.AccountName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("sku_name", model.Sku.Name) diff --git a/internal/services/maps/sdk/2021-02-01/accounts/constants.go b/internal/services/maps/sdk/2021-02-01/accounts/constants.go index 7278294c92cf..8a78feb2a4ad 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/constants.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/constants.go @@ -1,5 +1,7 @@ package accounts +import "strings" + type CreatedByType string const ( @@ -9,6 +11,31 @@ const ( CreatedByTypeUser CreatedByType = "User" ) +func PossibleValuesForCreatedByType() []string { + return []string{ + string(CreatedByTypeApplication), + string(CreatedByTypeKey), + string(CreatedByTypeManagedIdentity), + string(CreatedByTypeUser), + } +} + +func parseCreatedByType(input string) (*CreatedByType, error) { + vals := map[string]CreatedByType{ + "application": CreatedByTypeApplication, + "key": CreatedByTypeKey, + "managedidentity": CreatedByTypeManagedIdentity, + "user": CreatedByTypeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CreatedByType(input) + return &out, nil +} + type KeyType string const ( @@ -16,6 +43,27 @@ const ( KeyTypeSecondary KeyType = "secondary" ) +func PossibleValuesForKeyType() []string { + return []string{ + string(KeyTypePrimary), + string(KeyTypeSecondary), + } +} + +func parseKeyType(input string) (*KeyType, error) { + vals := map[string]KeyType{ + "primary": KeyTypePrimary, + "secondary": KeyTypeSecondary, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyType(input) + return &out, nil +} + type Kind string const ( @@ -23,6 +71,27 @@ const ( KindGenTwo Kind = "Gen2" ) +func PossibleValuesForKind() []string { + return []string{ + string(KindGenOne), + string(KindGenTwo), + } +} + +func parseKind(input string) (*Kind, error) { + vals := map[string]Kind{ + "gen1": KindGenOne, + "gen2": KindGenTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Kind(input) + return &out, nil +} + type Name string const ( @@ -30,3 +99,26 @@ const ( NameSOne Name = "S1" NameSZero Name = "S0" ) + +func PossibleValuesForName() []string { + return []string{ + string(NameGTwo), + string(NameSOne), + string(NameSZero), + } +} + +func parseName(input string) (*Name, error) { + vals := map[string]Name{ + "g2": NameGTwo, + "s1": NameSOne, + "s0": NameSZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Name(input) + return &out, nil +} diff --git a/internal/services/maps/sdk/2021-02-01/accounts/id_account.go b/internal/services/maps/sdk/2021-02-01/accounts/id_account.go index 04fc5ea9e004..dcee34c86f3b 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/id_account.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/id_account.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = AccountId{} + +// AccountId is a struct representing the Resource ID for a Account type AccountId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + AccountName string } -func NewAccountID(subscriptionId, resourceGroup, name string) AccountId { +// NewAccountID returns a new AccountId struct +func NewAccountID(subscriptionId string, resourceGroupName string, accountName string) AccountId { return AccountId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + AccountName: accountName, } } -func (id AccountId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Account", segmentsStr) -} - -func (id AccountId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Maps/accounts/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParseAccountID parses a Account ID into an AccountId struct +// ParseAccountID parses 'input' into a AccountId func ParseAccountID(input string) (*AccountId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AccountId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := AccountId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := AccountId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("accounts"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseAccountIDInsensitively parses an Account ID into an AccountId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseAccountID method should be used instead for validation etc. +// ParseAccountIDInsensitively parses 'input' case-insensitively into a AccountId +// note: this method should only be used for API response data and not user input func ParseAccountIDInsensitively(input string) (*AccountId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AccountId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AccountId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := AccountId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - // find the correct casing for the 'accounts' segment - accountsKey := "accounts" - for key := range id.Path { - if strings.EqualFold(key, accountsKey) { - accountsKey = key - break - } + return &id, nil +} + +// ValidateAccountID checks that 'input' can be parsed as a Account ID +func ValidateAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(accountsKey); err != nil { - return nil, err + + if _, err := ParseAccountID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Account ID +func (id AccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Maps/accounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Account ID +func (id AccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftMaps", "Microsoft.Maps", "Microsoft.Maps"), + resourceids.StaticSegment("accounts", "accounts", "accounts"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Account ID +func (id AccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + } + return fmt.Sprintf("Account (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/maps/sdk/2021-02-01/accounts/id_account_test.go b/internal/services/maps/sdk/2021-02-01/accounts/id_account_test.go index 4fb11da8a97a..e5c49cbc5db5 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/id_account_test.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/id_account_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = AccountId{} +var _ resourceids.ResourceId = AccountId{} -func TestAccountIDFormatter(t *testing.T) { - actual := NewAccountID("{subscriptionId}", "{resourceGroupName}", "{accountName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/accounts/{accountName}" +func TestNewAccountID(t *testing.T) { + id := NewAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } +} + +func TestFormatAccountID(t *testing.T) { + actual := NewAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseAccountID(t *testing.T) { Error bool Expected *AccountId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/accounts/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/accounts/{accountName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue", Expected: &AccountId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.MAPS/ACCOUNTS/{ACCOUNTNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseAccountID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseAccountID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } + } } @@ -115,90 +129,110 @@ func TestParseAccountIDInsensitively(t *testing.T) { Error bool Expected *AccountId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/accounts/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/accounts/{accountName}", - Expected: &AccountId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/accounts/{accountName}", - Expected: &AccountId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/ACCOUNTS/{accountName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue", Expected: &AccountId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Maps/AcCoUnTs/{accountName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE", Expected: &AccountId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseAccountIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseAccountIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } + } } diff --git a/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup.go b/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup.go index 4cc75abff624..701ad7475d26 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup_test.go b/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup_test.go index 75bd41bc29e2..c4a6f235ea46 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup_test.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,10 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } diff --git a/internal/services/maps/sdk/2021-02-01/accounts/id_subscription.go b/internal/services/maps/sdk/2021-02-01/accounts/id_subscription.go index dc099d4eeeca..f95107ae71d3 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/id_subscription.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/maps/sdk/2021-02-01/accounts/id_subscription_test.go b/internal/services/maps/sdk/2021-02-01/accounts/id_subscription_test.go index 4eb8eb1ac22a..7f03b066d161 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/id_subscription_test.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,6 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } diff --git a/internal/services/maps/sdk/2021-02-01/accounts/model_systemdata.go b/internal/services/maps/sdk/2021-02-01/accounts/model_systemdata.go index e096bd0b8bfa..8bc1758a073f 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/model_systemdata.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/model_systemdata.go @@ -1,11 +1,5 @@ package accounts -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - type SystemData struct { CreatedAt *string `json:"createdAt,omitempty"` CreatedBy *string `json:"createdBy,omitempty"` @@ -14,21 +8,3 @@ type SystemData struct { LastModifiedBy *string `json:"lastModifiedBy,omitempty"` LastModifiedByType *CreatedByType `json:"lastModifiedByType,omitempty"` } - -func (o SystemData) GetCreatedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o SystemData) SetCreatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.CreatedAt = &formatted -} - -func (o SystemData) GetLastModifiedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.LastModifiedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o SystemData) SetLastModifiedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.LastModifiedAt = &formatted -} diff --git a/internal/services/maps/sdk/2021-02-01/accounts/predicates.go b/internal/services/maps/sdk/2021-02-01/accounts/predicates.go index 11d6b453c585..05de8044727f 100644 --- a/internal/services/maps/sdk/2021-02-01/accounts/predicates.go +++ b/internal/services/maps/sdk/2021-02-01/accounts/predicates.go @@ -8,6 +8,7 @@ type MapsAccountPredicate struct { } func (p MapsAccountPredicate) Matches(input MapsAccount) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } diff --git a/internal/services/maps/sdk/2021-02-01/creators/client.go b/internal/services/maps/sdk/2021-02-01/creators/client.go new file mode 100644 index 000000000000..f01bc96dccfb --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/client.go @@ -0,0 +1,15 @@ +package creators + +import "github.com/Azure/go-autorest/autorest" + +type CreatorsClient struct { + Client autorest.Client + baseUri string +} + +func NewCreatorsClientWithBaseURI(endpoint string) CreatorsClient { + return CreatorsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/id_account.go b/internal/services/maps/sdk/2021-02-01/creators/id_account.go new file mode 100644 index 000000000000..eaf98848f564 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/id_account.go @@ -0,0 +1,124 @@ +package creators + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = AccountId{} + +// AccountId is a struct representing the Resource ID for a Account +type AccountId struct { + SubscriptionId string + ResourceGroupName string + AccountName string +} + +// NewAccountID returns a new AccountId struct +func NewAccountID(subscriptionId string, resourceGroupName string, accountName string) AccountId { + return AccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + AccountName: accountName, + } +} + +// ParseAccountID parses 'input' into a AccountId +func ParseAccountID(input string) (*AccountId, error) { + parser := resourceids.NewParserFromResourceIdType(AccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AccountId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseAccountIDInsensitively parses 'input' case-insensitively into a AccountId +// note: this method should only be used for API response data and not user input +func ParseAccountIDInsensitively(input string) (*AccountId, error) { + parser := resourceids.NewParserFromResourceIdType(AccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AccountId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateAccountID checks that 'input' can be parsed as a Account ID +func ValidateAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Account ID +func (id AccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Maps/accounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Account ID +func (id AccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftMaps", "Microsoft.Maps", "Microsoft.Maps"), + resourceids.StaticSegment("accounts", "accounts", "accounts"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), + } +} + +// String returns a human-readable description of this Account ID +func (id AccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + } + return fmt.Sprintf("Account (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/id_account_test.go b/internal/services/maps/sdk/2021-02-01/creators/id_account_test.go new file mode 100644 index 000000000000..5422e848448a --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/id_account_test.go @@ -0,0 +1,264 @@ +package creators + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = AccountId{} + +func TestNewAccountID(t *testing.T) { + id := NewAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } +} + +func TestFormatAccountID(t *testing.T) { + actual := NewAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue", + Expected: &AccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) + } + + } +} + +func TestParseAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue", + Expected: &AccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE", + Expected: &AccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) + } + + } +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/id_creator.go b/internal/services/maps/sdk/2021-02-01/creators/id_creator.go new file mode 100644 index 000000000000..dda2e7a230f1 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/id_creator.go @@ -0,0 +1,137 @@ +package creators + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = CreatorId{} + +// CreatorId is a struct representing the Resource ID for a Creator +type CreatorId struct { + SubscriptionId string + ResourceGroupName string + AccountName string + CreatorName string +} + +// NewCreatorID returns a new CreatorId struct +func NewCreatorID(subscriptionId string, resourceGroupName string, accountName string, creatorName string) CreatorId { + return CreatorId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + AccountName: accountName, + CreatorName: creatorName, + } +} + +// ParseCreatorID parses 'input' into a CreatorId +func ParseCreatorID(input string) (*CreatorId, error) { + parser := resourceids.NewParserFromResourceIdType(CreatorId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := CreatorId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) + } + + if id.CreatorName, ok = parsed.Parsed["creatorName"]; !ok { + return nil, fmt.Errorf("the segment 'creatorName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseCreatorIDInsensitively parses 'input' case-insensitively into a CreatorId +// note: this method should only be used for API response data and not user input +func ParseCreatorIDInsensitively(input string) (*CreatorId, error) { + parser := resourceids.NewParserFromResourceIdType(CreatorId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := CreatorId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) + } + + if id.CreatorName, ok = parsed.Parsed["creatorName"]; !ok { + return nil, fmt.Errorf("the segment 'creatorName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateCreatorID checks that 'input' can be parsed as a Creator ID +func ValidateCreatorID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseCreatorID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Creator ID +func (id CreatorId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Maps/accounts/%s/creators/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName, id.CreatorName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Creator ID +func (id CreatorId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftMaps", "Microsoft.Maps", "Microsoft.Maps"), + resourceids.StaticSegment("accounts", "accounts", "accounts"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), + resourceids.StaticSegment("creators", "creators", "creators"), + resourceids.UserSpecifiedSegment("creatorName", "creatorValue"), + } +} + +// String returns a human-readable description of this Creator ID +func (id CreatorId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + fmt.Sprintf("Creator Name: %q", id.CreatorName), + } + return fmt.Sprintf("Creator (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/id_creator_test.go b/internal/services/maps/sdk/2021-02-01/creators/id_creator_test.go new file mode 100644 index 000000000000..85b3f826e2c1 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/id_creator_test.go @@ -0,0 +1,309 @@ +package creators + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = CreatorId{} + +func TestNewCreatorID(t *testing.T) { + id := NewCreatorID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "creatorValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } + + if id.CreatorName != "creatorValue" { + t.Fatalf("Expected %q but got %q for Segment 'CreatorName'", id.CreatorName, "creatorValue") + } +} + +func TestFormatCreatorID(t *testing.T) { + actual := NewCreatorID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "creatorValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators/creatorValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseCreatorID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CreatorId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators/creatorValue", + Expected: &CreatorId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + CreatorName: "creatorValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators/creatorValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCreatorID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) + } + + if actual.CreatorName != v.Expected.CreatorName { + t.Fatalf("Expected %q but got %q for CreatorName", v.Expected.CreatorName, actual.CreatorName) + } + + } +} + +func TestParseCreatorIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CreatorId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE/cReAtOrS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators/creatorValue", + Expected: &CreatorId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + CreatorName: "creatorValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Maps/accounts/accountValue/creators/creatorValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE/cReAtOrS/cReAtOrVaLuE", + Expected: &CreatorId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", + CreatorName: "cReAtOrVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mApS/aCcOuNtS/aCcOuNtVaLuE/cReAtOrS/cReAtOrVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCreatorIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) + } + + if actual.CreatorName != v.Expected.CreatorName { + t.Fatalf("Expected %q but got %q for CreatorName", v.Expected.CreatorName, actual.CreatorName) + } + + } +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/method_createorupdate_autorest.go b/internal/services/maps/sdk/2021-02-01/creators/method_createorupdate_autorest.go new file mode 100644 index 000000000000..acd0ca7ba9b2 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/method_createorupdate_autorest.go @@ -0,0 +1,65 @@ +package creators + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateResponse struct { + HttpResponse *http.Response + Model *Creator +} + +// CreateOrUpdate ... +func (c CreatorsClient) CreateOrUpdate(ctx context.Context, id CreatorId, input Creator) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c CreatorsClient) preparerForCreateOrUpdate(ctx context.Context, id CreatorId, input Creator) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c CreatorsClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/method_delete_autorest.go b/internal/services/maps/sdk/2021-02-01/creators/method_delete_autorest.go new file mode 100644 index 000000000000..808983262e61 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/method_delete_autorest.go @@ -0,0 +1,61 @@ +package creators + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c CreatorsClient) Delete(ctx context.Context, id CreatorId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c CreatorsClient) preparerForDelete(ctx context.Context, id CreatorId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c CreatorsClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/method_get_autorest.go b/internal/services/maps/sdk/2021-02-01/creators/method_get_autorest.go new file mode 100644 index 000000000000..cf25918e10f9 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/method_get_autorest.go @@ -0,0 +1,64 @@ +package creators + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *Creator +} + +// Get ... +func (c CreatorsClient) Get(ctx context.Context, id CreatorId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c CreatorsClient) preparerForGet(ctx context.Context, id CreatorId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c CreatorsClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/method_listbyaccount_autorest.go b/internal/services/maps/sdk/2021-02-01/creators/method_listbyaccount_autorest.go new file mode 100644 index 000000000000..0cf1e097cafa --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/method_listbyaccount_autorest.go @@ -0,0 +1,183 @@ +package creators + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListByAccountResponse struct { + HttpResponse *http.Response + Model *[]Creator + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByAccountResponse, error) +} + +type ListByAccountCompleteResult struct { + Items []Creator +} + +func (r ListByAccountResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByAccountResponse) LoadMore(ctx context.Context) (resp ListByAccountResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ListByAccount ... +func (c CreatorsClient) ListByAccount(ctx context.Context, id AccountId) (resp ListByAccountResponse, err error) { + req, err := c.preparerForListByAccount(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "ListByAccount", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "ListByAccount", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByAccount(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "ListByAccount", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListByAccountComplete retrieves all of the results into a single object +func (c CreatorsClient) ListByAccountComplete(ctx context.Context, id AccountId) (ListByAccountCompleteResult, error) { + return c.ListByAccountCompleteMatchingPredicate(ctx, id, CreatorPredicate{}) +} + +// ListByAccountCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c CreatorsClient) ListByAccountCompleteMatchingPredicate(ctx context.Context, id AccountId, predicate CreatorPredicate) (resp ListByAccountCompleteResult, err error) { + items := make([]Creator, 0) + + page, err := c.ListByAccount(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByAccountCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListByAccount prepares the ListByAccount request. +func (c CreatorsClient) preparerForListByAccount(ctx context.Context, id AccountId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/creators", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByAccountWithNextLink prepares the ListByAccount request with the given nextLink token. +func (c CreatorsClient) preparerForListByAccountWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByAccount handles the response to the ListByAccount request. The method always +// closes the http.Response Body. +func (c CreatorsClient) responderForListByAccount(resp *http.Response) (result ListByAccountResponse, err error) { + type page struct { + Values []Creator `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByAccountResponse, err error) { + req, err := c.preparerForListByAccountWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "ListByAccount", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "ListByAccount", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByAccount(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "ListByAccount", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/method_update_autorest.go b/internal/services/maps/sdk/2021-02-01/creators/method_update_autorest.go new file mode 100644 index 000000000000..131d64d6ec47 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/method_update_autorest.go @@ -0,0 +1,65 @@ +package creators + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type UpdateResponse struct { + HttpResponse *http.Response + Model *Creator +} + +// Update ... +func (c CreatorsClient) Update(ctx context.Context, id CreatorId, input CreatorUpdateParameters) (result UpdateResponse, err error) { + req, err := c.preparerForUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Update", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Update", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "creators.CreatorsClient", "Update", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForUpdate prepares the Update request. +func (c CreatorsClient) preparerForUpdate(ctx context.Context, id CreatorId, input CreatorUpdateParameters) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForUpdate handles the response to the Update request. The method always +// closes the http.Response Body. +func (c CreatorsClient) responderForUpdate(resp *http.Response) (result UpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/model_creator.go b/internal/services/maps/sdk/2021-02-01/creators/model_creator.go new file mode 100644 index 000000000000..c21ffc6c4023 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/model_creator.go @@ -0,0 +1,10 @@ +package creators + +type Creator struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties CreatorProperties `json:"properties"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/model_creatorproperties.go b/internal/services/maps/sdk/2021-02-01/creators/model_creatorproperties.go new file mode 100644 index 000000000000..29911060ce26 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/model_creatorproperties.go @@ -0,0 +1,6 @@ +package creators + +type CreatorProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + StorageUnits int64 `json:"storageUnits"` +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/model_creatorupdateparameters.go b/internal/services/maps/sdk/2021-02-01/creators/model_creatorupdateparameters.go new file mode 100644 index 000000000000..9642bcb0d329 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/model_creatorupdateparameters.go @@ -0,0 +1,6 @@ +package creators + +type CreatorUpdateParameters struct { + Properties *CreatorProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/predicates.go b/internal/services/maps/sdk/2021-02-01/creators/predicates.go new file mode 100644 index 000000000000..5094dec93f85 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/predicates.go @@ -0,0 +1,29 @@ +package creators + +type CreatorPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p CreatorPredicate) Matches(input Creator) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/internal/services/maps/sdk/2021-02-01/creators/version.go b/internal/services/maps/sdk/2021-02-01/creators/version.go new file mode 100644 index 000000000000..a938be04aeb5 --- /dev/null +++ b/internal/services/maps/sdk/2021-02-01/creators/version.go @@ -0,0 +1,9 @@ +package creators + +import "fmt" + +const defaultApiVersion = "2021-02-01" + +func userAgent() string { + return fmt.Sprintf("pandora/creators/%s", defaultApiVersion) +} diff --git a/internal/services/monitor/client/client.go b/internal/services/monitor/client/client.go index fccbecf85e2a..ed3e466bcaf0 100644 --- a/internal/services/monitor/client/client.go +++ b/internal/services/monitor/client/client.go @@ -27,6 +27,7 @@ type Client struct { DiagnosticSettingsCategoryClient *classic.DiagnosticSettingsCategoryClient LogProfilesClient *classic.LogProfilesClient MetricAlertsClient *classic.MetricAlertsClient + PrivateLinkScopesClient *classic.PrivateLinkScopesClient ScheduledQueryRulesClient *classic.ScheduledQueryRulesClient } @@ -64,6 +65,9 @@ func NewClient(o *common.ClientOptions) *Client { MetricAlertsClient := classic.NewMetricAlertsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&MetricAlertsClient.Client, o.ResourceManagerAuthorizer) + PrivateLinkScopesClient := classic.NewPrivateLinkScopesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&PrivateLinkScopesClient.Client, o.ResourceManagerAuthorizer) + ScheduledQueryRulesClient := classic.NewScheduledQueryRulesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ScheduledQueryRulesClient.Client, o.ResourceManagerAuthorizer) @@ -79,6 +83,7 @@ func NewClient(o *common.ClientOptions) *Client { DiagnosticSettingsCategoryClient: &DiagnosticSettingsCategoryClient, LogProfilesClient: &LogProfilesClient, MetricAlertsClient: &MetricAlertsClient, + PrivateLinkScopesClient: &PrivateLinkScopesClient, ScheduledQueryRulesClient: &ScheduledQueryRulesClient, } } diff --git a/internal/services/monitor/monitor_private_link_scope_resource.go b/internal/services/monitor/monitor_private_link_scope_resource.go new file mode 100644 index 000000000000..54a83ae90ddc --- /dev/null +++ b/internal/services/monitor/monitor_private_link_scope_resource.go @@ -0,0 +1,138 @@ +package monitor + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2021-07-01-preview/insights" + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" + "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/monitor/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/monitor/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/tags" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +func resourceMonitorPrivateLinkScope() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceMonitorPrivateLinkScopeCreateUpdate, + Read: resourceMonitorPrivateLinkScopeRead, + Update: resourceMonitorPrivateLinkScopeCreateUpdate, + Delete: resourceMonitorPrivateLinkScopeDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.PrivateLinkScopeID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.PrivateLinkScopeName, + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "tags": tags.Schema(), + }, + } +} + +func resourceMonitorPrivateLinkScopeCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).Monitor.PrivateLinkScopesClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + id := parse.NewPrivateLinkScopeID(subscriptionId, resourceGroup, name) + + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_monitor_private_link_scope", id.ID()) + } + } + + parameters := insights.AzureMonitorPrivateLinkScope{ + Location: utils.String("Global"), + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + if _, err := client.CreateOrUpdate(ctx, resourceGroup, name, parameters); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceMonitorPrivateLinkScopeRead(d, meta) +} + +func resourceMonitorPrivateLinkScopeRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Monitor.PrivateLinkScopesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.PrivateLinkScopeID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] %s does not exist - removing from state!", id) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceMonitorPrivateLinkScopeDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Monitor.PrivateLinkScopesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.PrivateLinkScopeID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.Name) + if err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %+v", *id, err) + } + + return nil +} diff --git a/internal/services/monitor/monitor_private_link_scope_resource_test.go b/internal/services/monitor/monitor_private_link_scope_resource_test.go new file mode 100644 index 000000000000..f7033ec686ee --- /dev/null +++ b/internal/services/monitor/monitor_private_link_scope_resource_test.go @@ -0,0 +1,150 @@ +package monitor_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/monitor/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type MonitorPrivateLinkScopeResource struct{} + +func TestAccMonitorPrivateLinkScope_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_monitor_private_link_scope", "test") + r := MonitorPrivateLinkScopeResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccMonitorPrivateLinkScope_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_monitor_private_link_scope", "test") + r := MonitorPrivateLinkScopeResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccMonitorPrivateLinkScope_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_monitor_private_link_scope", "test") + r := MonitorPrivateLinkScopeResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data, "Test"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccMonitorPrivateLinkScope_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_monitor_private_link_scope", "test") + r := MonitorPrivateLinkScopeResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data, "Test1"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data, "Test2"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r MonitorPrivateLinkScopeResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.PrivateLinkScopeID(state.ID) + if err != nil { + return nil, err + } + + resp, err := client.Monitor.PrivateLinkScopesClient.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving %q %+v", id, err) + } + + return utils.Bool(resp.AzureMonitorPrivateLinkScopeProperties != nil), nil +} + +func (r MonitorPrivateLinkScopeResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-pls-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r MonitorPrivateLinkScopeResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_private_link_scope" "test" { + name = "acctest-ampls-%d" + resource_group_name = azurerm_resource_group.test.name +} +`, r.template(data), data.RandomInteger) +} + +func (r MonitorPrivateLinkScopeResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_private_link_scope" "import" { + name = azurerm_monitor_private_link_scope.test.name + resource_group_name = azurerm_monitor_private_link_scope.test.resource_group_name +} +`, r.basic(data)) +} + +func (r MonitorPrivateLinkScopeResource) complete(data acceptance.TestData, tag string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_private_link_scope" "test" { + name = "acctest-AMPLS-%d" + resource_group_name = azurerm_resource_group.test.name + + tags = { + ENV = "%s" + } +} +`, r.template(data), data.RandomInteger, tag) +} diff --git a/internal/services/monitor/parse/private_link_scope.go b/internal/services/monitor/parse/private_link_scope.go new file mode 100644 index 000000000000..5c3049a319a8 --- /dev/null +++ b/internal/services/monitor/parse/private_link_scope.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" +) + +type PrivateLinkScopeId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewPrivateLinkScopeID(subscriptionId, resourceGroup, name string) PrivateLinkScopeId { + return PrivateLinkScopeId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id PrivateLinkScopeId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Private Link Scope", segmentsStr) +} + +func (id PrivateLinkScopeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Insights/privateLinkScopes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// PrivateLinkScopeID parses a PrivateLinkScope ID into an PrivateLinkScopeId struct +func PrivateLinkScopeID(input string) (*PrivateLinkScopeId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := PrivateLinkScopeId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("privateLinkScopes"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/internal/services/monitor/parse/private_link_scope_test.go b/internal/services/monitor/parse/private_link_scope_test.go new file mode 100644 index 000000000000..1c09f4d69709 --- /dev/null +++ b/internal/services/monitor/parse/private_link_scope_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = PrivateLinkScopeId{} + +func TestPrivateLinkScopeIDFormatter(t *testing.T) { + actual := NewPrivateLinkScopeID("12345678-1234-9876-4563-123456789012", "group1", "pls1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/pls1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestPrivateLinkScopeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateLinkScopeId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/pls1", + Expected: &PrivateLinkScopeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "group1", + Name: "pls1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/GROUP1/PROVIDERS/MICROSOFT.INSIGHTS/PRIVATELINKSCOPES/PLS1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := PrivateLinkScopeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/internal/services/monitor/registration.go b/internal/services/monitor/registration.go index 791dd7eea5b1..78a0beaf6643 100644 --- a/internal/services/monitor/registration.go +++ b/internal/services/monitor/registration.go @@ -41,6 +41,7 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_monitor_diagnostic_setting": resourceMonitorDiagnosticSetting(), "azurerm_monitor_log_profile": resourceMonitorLogProfile(), "azurerm_monitor_metric_alert": resourceMonitorMetricAlert(), + "azurerm_monitor_private_link_scope": resourceMonitorPrivateLinkScope(), "azurerm_monitor_scheduled_query_rules_alert": resourceMonitorScheduledQueryRulesAlert(), "azurerm_monitor_scheduled_query_rules_log": resourceMonitorScheduledQueryRulesLog(), "azurerm_monitor_smart_detector_alert_rule": resourceMonitorSmartDetectorAlertRule(), diff --git a/internal/services/monitor/resourceids.go b/internal/services/monitor/resourceids.go index 8b54ac58422f..1fa157bc3c84 100644 --- a/internal/services/monitor/resourceids.go +++ b/internal/services/monitor/resourceids.go @@ -7,4 +7,5 @@ package monitor //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=AutoscaleSetting -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/autoscaleSettings/setting1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=LogProfile -id=/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Insights/logProfiles/profile1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=MetricAlert -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/metricAlerts/alert1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=PrivateLinkScope -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/pls1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ScheduledQueryRules -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/scheduledQueryRules/rule1 diff --git a/internal/services/signalr/validate/service_id.go b/internal/services/monitor/validate/private_link_scope_id.go similarity index 51% rename from internal/services/signalr/validate/service_id.go rename to internal/services/monitor/validate/private_link_scope_id.go index 0442fefc5875..643b0f3aa8cd 100644 --- a/internal/services/signalr/validate/service_id.go +++ b/internal/services/monitor/validate/private_link_scope_id.go @@ -1,19 +1,21 @@ package validate +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + import ( "fmt" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/sdk/2020-05-01/signalr" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/monitor/parse" ) -func ServiceID(input interface{}, key string) (warnings []string, errors []error) { +func PrivateLinkScopeID(input interface{}, key string) (warnings []string, errors []error) { v, ok := input.(string) if !ok { errors = append(errors, fmt.Errorf("expected %q to be a string", key)) return } - if _, err := signalr.ParseSignalRID(v); err != nil { + if _, err := parse.PrivateLinkScopeID(v); err != nil { errors = append(errors, err) } diff --git a/internal/services/signalr/validate/service_id_test.go b/internal/services/monitor/validate/private_link_scope_id_test.go similarity index 69% rename from internal/services/signalr/validate/service_id_test.go rename to internal/services/monitor/validate/private_link_scope_id_test.go index a17761186a39..1b6274d5b331 100644 --- a/internal/services/signalr/validate/service_id_test.go +++ b/internal/services/monitor/validate/private_link_scope_id_test.go @@ -1,8 +1,10 @@ package validate +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + import "testing" -func TestServiceID(t *testing.T) { +func TestPrivateLinkScopeID(t *testing.T) { cases := []struct { Input string Valid bool @@ -39,32 +41,32 @@ func TestServiceID(t *testing.T) { }, { - // missing SignalRName - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.", + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/", Valid: false, }, { - // missing value for SignalRName - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.SignalRService/SignalR/", + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/", Valid: false, }, { // valid - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.SignalRService/SignalR/service1", + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/pls1", Valid: true, }, { // upper-cased - Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.SIGNALRSERVICE/SIGNALR/SERVICE1", + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/GROUP1/PROVIDERS/MICROSOFT.INSIGHTS/PRIVATELINKSCOPES/PLS1", Valid: false, }, } for _, tc := range cases { t.Logf("[DEBUG] Testing Value %s", tc.Input) - _, errors := ServiceID(tc.Input, "test") + _, errors := PrivateLinkScopeID(tc.Input, "test") valid := len(errors) == 0 if tc.Valid != valid { diff --git a/internal/services/monitor/validate/private_link_scope_name.go b/internal/services/monitor/validate/private_link_scope_name.go new file mode 100644 index 000000000000..2c0216ab4459 --- /dev/null +++ b/internal/services/monitor/validate/private_link_scope_name.go @@ -0,0 +1,31 @@ +package validate + +import ( + "fmt" + "regexp" +) + +func PrivateLinkScopeName(i interface{}, k string) (warning []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if len(v) < 1 { + errors = append(errors, fmt.Errorf("length should be greater than %d", 1)) + return + } + + if len(v) > 255 { + errors = append(errors, fmt.Errorf("length should be less and equal than %d", 255)) + return + } + + if !regexp.MustCompile(`^[a-zA-Z0-9()-_.]*[a-zA-Z0-9_-]$`).MatchString(v) { + errors = append(errors, fmt.Errorf("%q only allows alphanumeric characters, periods, underscores, hyphens and parenthesis and cannot end in a period", k)) + return + } + + return +} diff --git a/internal/services/monitor/validate/private_link_scope_name_test.go b/internal/services/monitor/validate/private_link_scope_name_test.go new file mode 100644 index 000000000000..805e7dc6f0f6 --- /dev/null +++ b/internal/services/monitor/validate/private_link_scope_name_test.go @@ -0,0 +1,76 @@ +package validate + +import ( + "strings" + "testing" +) + +func TestPrivateLinkScopeName(t *testing.T) { + testData := []struct { + input string + expected bool + }{ + { + input: "", + expected: false, + }, + { + input: "a", + expected: true, + }, + { + input: "A", + expected: true, + }, + { + input: "8", + expected: true, + }, + { + input: "a_-.b", + expected: true, + }, + { + input: "ab_", + expected: true, + }, + { + input: "ab-", + expected: true, + }, + { + input: "a+", + expected: false, + }, + { + input: "a.", + expected: false, + }, + { + input: "a.", + expected: false, + }, + { + input: strings.Repeat("s", 254), + expected: true, + }, + { + input: strings.Repeat("s", 255), + expected: true, + }, + { + input: strings.Repeat("s", 256), + expected: false, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q..", v.input) + + _, errors := PrivateLinkScopeName(v.input, "name") + actual := len(errors) == 0 + if v.expected != actual { + t.Fatalf("Expected %t but got %t", v.expected, actual) + } + } +} diff --git a/internal/services/mssql/mssql_virtual_machine_resource_test.go b/internal/services/mssql/mssql_virtual_machine_resource_test.go index cbbc35a66369..8ccb6e96ebd7 100644 --- a/internal/services/mssql/mssql_virtual_machine_resource_test.go +++ b/internal/services/mssql/mssql_virtual_machine_resource_test.go @@ -4,9 +4,7 @@ import ( "context" "fmt" "testing" - "time" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -129,16 +127,10 @@ func TestAccMsSqlVirtualMachine_autoPatching(t *testing.T) { func TestAccMsSqlVirtualMachine_keyVault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_mssql_virtual_machine", "test") r := MsSqlVirtualMachineResource{} - value, err := uuid.GenerateUUID() - if err != nil { - t.Fatal(err) - } - loc, _ := time.LoadLocation("UTC") - keyVaultTime := time.Now().UTC().Add(time.Hour * 240).In(loc).Format("2006-01-02T15:04:00Z") data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.withKeyVault(data, value, keyVaultTime), + Config: r.withKeyVault(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -146,7 +138,7 @@ func TestAccMsSqlVirtualMachine_keyVault(t *testing.T) { data.ImportStep("key_vault_credential.0.key_vault_url", "key_vault_credential.0.service_principal_name", "key_vault_credential.0.service_principal_secret"), { - Config: r.withKeyVaultUpdated(data, value, keyVaultTime), + Config: r.withKeyVaultUpdated(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -457,7 +449,7 @@ resource "azurerm_mssql_virtual_machine" "test" { `, r.template(data), data.RandomString) } -func (r MsSqlVirtualMachineResource) withKeyVault(data acceptance.TestData, value string, keyvaultTime string) string { +func (r MsSqlVirtualMachineResource) withKeyVault(data acceptance.TestData) string { return fmt.Sprintf(` %[1]s @@ -511,17 +503,15 @@ resource "azurerm_key_vault_key" "generated" { } resource "azuread_application" "test" { - name = "acctestspa%[2]d" + display_name = "acctestspa%[2]d" } resource "azuread_service_principal" "test" { application_id = azuread_application.test.application_id } -resource "azuread_service_principal_password" "test" { - service_principal_id = azuread_service_principal.test.id - value = "%[3]s" - end_date = "%[4]s" +resource "azuread_application_password" "test" { + application_object_id = azuread_application.test.object_id } resource "azurerm_mssql_virtual_machine" "test" { @@ -531,13 +521,13 @@ resource "azurerm_mssql_virtual_machine" "test" { name = "acctestkv" key_vault_url = azurerm_key_vault_key.generated.id service_principal_name = azuread_service_principal.test.display_name - service_principal_secret = azuread_service_principal_password.test.value + service_principal_secret = azuread_application_password.test.value } } -`, r.template(data), data.RandomInteger, value, keyvaultTime) +`, r.template(data), data.RandomInteger) } -func (r MsSqlVirtualMachineResource) withKeyVaultUpdated(data acceptance.TestData, value string, keyvaultTime string) string { +func (r MsSqlVirtualMachineResource) withKeyVaultUpdated(data acceptance.TestData) string { return fmt.Sprintf(` %[1]s @@ -591,17 +581,15 @@ resource "azurerm_key_vault_key" "generated" { } resource "azuread_application" "test" { - name = "acctestspa%[2]d" + display_name = "acctestspa%[2]d" } resource "azuread_service_principal" "test" { application_id = azuread_application.test.application_id } -resource "azuread_service_principal_password" "test" { - service_principal_id = azuread_service_principal.test.id - value = "%[3]s" - end_date = "%[4]s" +resource "azuread_application_password" "test" { + application_object_id = azuread_application.test.object_id } resource "azurerm_mssql_virtual_machine" "test" { @@ -611,10 +599,10 @@ resource "azurerm_mssql_virtual_machine" "test" { name = "acctestkv2" key_vault_url = azurerm_key_vault_key.generated.id service_principal_name = azuread_service_principal.test.display_name - service_principal_secret = azuread_service_principal_password.test.value + service_principal_secret = azuread_application_password.test.value } } -`, r.template(data), data.RandomInteger, value, keyvaultTime) +`, r.template(data), data.RandomInteger) } func (r MsSqlVirtualMachineResource) storageConfiguration(data acceptance.TestData) string { diff --git a/internal/services/mysql/client/client.go b/internal/services/mysql/client/client.go index 19bea28550b9..f9e8fbe7cc4a 100644 --- a/internal/services/mysql/client/client.go +++ b/internal/services/mysql/client/client.go @@ -12,6 +12,7 @@ type Client struct { FirewallRulesClient *mysql.FirewallRulesClient FlexibleServerConfigurationsClient *mysqlflexibleservers.ConfigurationsClient FlexibleServerClient *mysqlflexibleservers.ServersClient + FlexibleServerFirewallRulesClient *mysqlflexibleservers.FirewallRulesClient ServersClient *mysql.ServersClient ServerKeysClient *mysql.ServerKeysClient ServerSecurityAlertPoliciesClient *mysql.ServerSecurityAlertPoliciesClient @@ -32,6 +33,9 @@ func NewClient(o *common.ClientOptions) *Client { flexibleServerClient := mysqlflexibleservers.NewServersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&flexibleServerClient.Client, o.ResourceManagerAuthorizer) + flexibleServerFirewallRulesClient := mysqlflexibleservers.NewFirewallRulesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&flexibleServerFirewallRulesClient.Client, o.ResourceManagerAuthorizer) + flexibleServerConfigurationsClient := mysqlflexibleservers.NewConfigurationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&flexibleServerConfigurationsClient.Client, o.ResourceManagerAuthorizer) @@ -55,6 +59,7 @@ func NewClient(o *common.ClientOptions) *Client { DatabasesClient: &DatabasesClient, FirewallRulesClient: &FirewallRulesClient, FlexibleServerClient: &flexibleServerClient, + FlexibleServerFirewallRulesClient: &flexibleServerFirewallRulesClient, FlexibleServerConfigurationsClient: &flexibleServerConfigurationsClient, ServersClient: &ServersClient, ServerKeysClient: &ServerKeysClient, diff --git a/internal/services/mysql/mysql_flexible_server_firewall_rule_resource.go b/internal/services/mysql/mysql_flexible_server_firewall_rule_resource.go new file mode 100644 index 000000000000..308ca3057e93 --- /dev/null +++ b/internal/services/mysql/mysql_flexible_server_firewall_rule_resource.go @@ -0,0 +1,163 @@ +package mysql + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2021-05-01/mysqlflexibleservers" + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" + "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" + azValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/mysql/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/mysql/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +func resourceMySqlFlexibleServerFirewallRule() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceMySqlFlexibleServerFirewallRuleCreateUpdate, + Read: resourceMySqlFlexibleServerFirewallRuleRead, + Update: resourceMySqlFlexibleServerFirewallRuleCreateUpdate, + Delete: resourceMySqlFlexibleServerFirewallRuleDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.FlexibleServerFirewallRuleID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "server_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.FlexibleServerName, + }, + + "start_ip_address": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: azValidate.IPv4Address, + }, + + "end_ip_address": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: azValidate.IPv4Address, + }, + }, + } +} + +func resourceMySqlFlexibleServerFirewallRuleCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).MySQL.FlexibleServerFirewallRulesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + log.Printf("[INFO] preparing arguments for AzureRM MySQL Flexible Server Firewall Rule creation.") + startIPAddress := d.Get("start_ip_address").(string) + endIPAddress := d.Get("end_ip_address").(string) + + id := parse.NewFlexibleServerFirewallRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("server_name").(string), d.Get("name").(string)) + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.FlexibleServerName, id.FirewallRuleName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %v", id, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_mysql_flexible_server_firewall_rule", id.ID()) + } + } + + properties := mysqlflexibleservers.FirewallRule{ + FirewallRuleProperties: &mysqlflexibleservers.FirewallRuleProperties{ + StartIPAddress: utils.String(startIPAddress), + EndIPAddress: utils.String(endIPAddress), + }, + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FlexibleServerName, id.FirewallRuleName, properties) + if err != nil { + return fmt.Errorf("creating/updating %s: %v", id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for create/update of %s: %v", id, err) + } + + d.SetId(id.ID()) + return resourceMySqlFlexibleServerFirewallRuleRead(d, meta) +} + +func resourceMySqlFlexibleServerFirewallRuleRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).MySQL.FlexibleServerFirewallRulesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.FlexibleServerFirewallRuleID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FlexibleServerName, id.FirewallRuleName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + d.Set("name", id.FirewallRuleName) + d.Set("server_name", id.FlexibleServerName) + d.Set("resource_group_name", id.ResourceGroup) + + d.Set("start_ip_address", resp.StartIPAddress) + d.Set("end_ip_address", resp.EndIPAddress) + + return nil +} + +func resourceMySqlFlexibleServerFirewallRuleDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).MySQL.FlexibleServerFirewallRulesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.FlexibleServerFirewallRuleID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.FlexibleServerName, id.FirewallRuleName) + if err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %+v", *id, err) + } + + return nil +} diff --git a/internal/services/mysql/mysql_flexible_server_firewall_rule_resource_test.go b/internal/services/mysql/mysql_flexible_server_firewall_rule_resource_test.go new file mode 100644 index 000000000000..9667b4a89f6b --- /dev/null +++ b/internal/services/mysql/mysql_flexible_server_firewall_rule_resource_test.go @@ -0,0 +1,105 @@ +package mysql_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/mysql/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type MySQLFlexibleServerFirewallRuleResource struct { +} + +func TestAccMySQLFlexibleServerFirewallRule_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_mysql_flexible_server_firewall_rule", "test") + r := MySQLFlexibleServerFirewallRuleResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccMySQLFlexibleServerFirewallRule_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_mysql_flexible_server_firewall_rule", "test") + r := MySQLFlexibleServerFirewallRuleResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (t MySQLFlexibleServerFirewallRuleResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.FlexibleServerFirewallRuleID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.MySQL.FlexibleServerFirewallRulesClient.Get(ctx, id.ResourceGroup, id.FlexibleServerName, id.FirewallRuleName) + if err != nil { + return nil, err + } + + return utils.Bool(resp.Name != nil), nil +} + +func (MySQLFlexibleServerFirewallRuleResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_mysql_flexible_server" "test" { + name = "acctest-fs-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "adminTerraform" + administrator_password = "QAZwsx123" + sku_name = "B_Standard_B1s" +} + +resource "azurerm_mysql_flexible_server_firewall_rule" "test" { + name = "acctestfwrule-%d" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_mysql_flexible_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "255.255.255.255" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (r MySQLFlexibleServerFirewallRuleResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_mysql_flexible_server_firewall_rule" "import" { + name = azurerm_mysql_flexible_server_firewall_rule.test.name + resource_group_name = azurerm_mysql_flexible_server_firewall_rule.test.resource_group_name + server_name = azurerm_mysql_flexible_server_firewall_rule.test.server_name + start_ip_address = azurerm_mysql_flexible_server_firewall_rule.test.start_ip_address + end_ip_address = azurerm_mysql_flexible_server_firewall_rule.test.end_ip_address +} +`, r.basic(data)) +} diff --git a/internal/services/mysql/parse/flexible_server_firewall_rule.go b/internal/services/mysql/parse/flexible_server_firewall_rule.go new file mode 100644 index 000000000000..d710ffd4bea0 --- /dev/null +++ b/internal/services/mysql/parse/flexible_server_firewall_rule.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" +) + +type FlexibleServerFirewallRuleId struct { + SubscriptionId string + ResourceGroup string + FlexibleServerName string + FirewallRuleName string +} + +func NewFlexibleServerFirewallRuleID(subscriptionId, resourceGroup, flexibleServerName, firewallRuleName string) FlexibleServerFirewallRuleId { + return FlexibleServerFirewallRuleId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + FlexibleServerName: flexibleServerName, + FirewallRuleName: firewallRuleName, + } +} + +func (id FlexibleServerFirewallRuleId) String() string { + segments := []string{ + fmt.Sprintf("Firewall Rule Name %q", id.FirewallRuleName), + fmt.Sprintf("Flexible Server Name %q", id.FlexibleServerName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Flexible Server Firewall Rule", segmentsStr) +} + +func (id FlexibleServerFirewallRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DBforMySQL/flexibleServers/%s/firewallRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.FlexibleServerName, id.FirewallRuleName) +} + +// FlexibleServerFirewallRuleID parses a FlexibleServerFirewallRule ID into an FlexibleServerFirewallRuleId struct +func FlexibleServerFirewallRuleID(input string) (*FlexibleServerFirewallRuleId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := FlexibleServerFirewallRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.FlexibleServerName, err = id.PopSegment("flexibleServers"); err != nil { + return nil, err + } + if resourceId.FirewallRuleName, err = id.PopSegment("firewallRules"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/internal/services/mysql/parse/flexible_server_firewall_rule_test.go b/internal/services/mysql/parse/flexible_server_firewall_rule_test.go new file mode 100644 index 000000000000..a08ec99a979c --- /dev/null +++ b/internal/services/mysql/parse/flexible_server_firewall_rule_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = FlexibleServerFirewallRuleId{} + +func TestFlexibleServerFirewallRuleIDFormatter(t *testing.T) { + actual := NewFlexibleServerFirewallRuleID("12345678-1234-9876-4563-123456789012", "resGroup1", "flexibleServer1", "firewallRule1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/firewallRule1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestFlexibleServerFirewallRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FlexibleServerFirewallRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing FlexibleServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/", + Error: true, + }, + + { + // missing value for FlexibleServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/", + Error: true, + }, + + { + // missing FirewallRuleName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/", + Error: true, + }, + + { + // missing value for FirewallRuleName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/firewallRule1", + Expected: &FlexibleServerFirewallRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + FlexibleServerName: "flexibleServer1", + FirewallRuleName: "firewallRule1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DBFORMYSQL/FLEXIBLESERVERS/FLEXIBLESERVER1/FIREWALLRULES/FIREWALLRULE1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := FlexibleServerFirewallRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.FlexibleServerName != v.Expected.FlexibleServerName { + t.Fatalf("Expected %q but got %q for FlexibleServerName", v.Expected.FlexibleServerName, actual.FlexibleServerName) + } + if actual.FirewallRuleName != v.Expected.FirewallRuleName { + t.Fatalf("Expected %q but got %q for FirewallRuleName", v.Expected.FirewallRuleName, actual.FirewallRuleName) + } + } +} diff --git a/internal/services/mysql/registration.go b/internal/services/mysql/registration.go index 23a6b31fdeb2..c2116b20a836 100644 --- a/internal/services/mysql/registration.go +++ b/internal/services/mysql/registration.go @@ -33,6 +33,7 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_mysql_firewall_rule": resourceMySqlFirewallRule(), "azurerm_mysql_flexible_server": resourceMysqlFlexibleServer(), "azurerm_mysql_flexible_server_configuration": resourceMySQLFlexibleServerConfiguration(), + "azurerm_mysql_flexible_server_firewall_rule": resourceMySqlFlexibleServerFirewallRule(), "azurerm_mysql_server": resourceMySqlServer(), "azurerm_mysql_server_key": resourceMySQLServerKey(), "azurerm_mysql_virtual_network_rule": resourceMySQLVirtualNetworkRule(), diff --git a/internal/services/mysql/resourceids.go b/internal/services/mysql/resourceids.go index 05abfff21a4a..eeb99805c91d 100644 --- a/internal/services/mysql/resourceids.go +++ b/internal/services/mysql/resourceids.go @@ -6,6 +6,7 @@ package mysql //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=FirewallRule -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/servers/server1/firewallRules/firewallRule1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=FlexibleServer -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=FlexibleServerConfiguration -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/configurations/config1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=FlexibleServerFirewallRule -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/firewallRule1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Key -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/servers/server1/keys/key1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Server -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/servers/server1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=VirtualNetworkRule -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/servers/server1/virtualNetworkRules/virtualNetworkRule1 diff --git a/internal/services/mysql/validate/flexible_server_firewall_rule_id.go b/internal/services/mysql/validate/flexible_server_firewall_rule_id.go new file mode 100644 index 000000000000..6386b05ab4c0 --- /dev/null +++ b/internal/services/mysql/validate/flexible_server_firewall_rule_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-azurerm/internal/services/mysql/parse" +) + +func FlexibleServerFirewallRuleID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.FlexibleServerFirewallRuleID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/internal/services/mysql/validate/flexible_server_firewall_rule_id_test.go b/internal/services/mysql/validate/flexible_server_firewall_rule_id_test.go new file mode 100644 index 000000000000..f3a2c0b73536 --- /dev/null +++ b/internal/services/mysql/validate/flexible_server_firewall_rule_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestFlexibleServerFirewallRuleID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing FlexibleServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/", + Valid: false, + }, + + { + // missing value for FlexibleServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/", + Valid: false, + }, + + { + // missing FirewallRuleName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/", + Valid: false, + }, + + { + // missing value for FirewallRuleName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/firewallRule1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DBFORMYSQL/FLEXIBLESERVERS/FLEXIBLESERVER1/FIREWALLRULES/FIREWALLRULE1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := FlexibleServerFirewallRuleID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/internal/services/network/network_watcher_flow_log_resource.go b/internal/services/network/network_watcher_flow_log_resource.go index 1aedeb1d5fcb..73580c0e8816 100644 --- a/internal/services/network/network_watcher_flow_log_resource.go +++ b/internal/services/network/network_watcher_flow_log_resource.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/location" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/locks" "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" @@ -183,6 +184,9 @@ func resourceNetworkWatcherFlowLogCreateUpdate(d *pluginsdk.ResourceData, meta i nsgId, _ := parse.NetworkSecurityGroupID(networkSecurityGroupID) id := parse.NewFlowLogID(subscriptionId, resourceGroupName, networkWatcherName, *nsgId) + locks.ByID(nsgId.ID()) + defer locks.UnlockByID(nsgId.ID()) + loc := d.Get("location").(string) if loc == "" { // Get the containing network watcher in order to reuse its location if the "location" is not specified. @@ -296,6 +300,9 @@ func resourceNetworkWatcherFlowLogDelete(d *pluginsdk.ResourceData, meta interfa return err } + locks.ByID(id.NetworkSecurityGroupID()) + defer locks.UnlockByID(id.NetworkSecurityGroupID()) + future, err := client.Delete(ctx, id.ResourceGroupName, id.NetworkWatcherName, id.Name()) if err != nil { return fmt.Errorf("deleting %s: %v", id, err) diff --git a/internal/services/network/private_endpoint_resource.go b/internal/services/network/private_endpoint_resource.go index 7c1f19440262..5c5190224ed6 100644 --- a/internal/services/network/private_endpoint_resource.go +++ b/internal/services/network/private_endpoint_resource.go @@ -7,6 +7,8 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/sdk/2020-05-01/signalr" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" @@ -623,6 +625,11 @@ func flattenPrivateLinkEndpointServiceConnection(serviceConnections *[]network.P privateConnectionId = serverId.ID() } } + if strings.Contains(strings.ToLower(privateConnectionId), "microsoft.signalrservice") { + if serviceId, err := signalr.ParseSignalRIDInsensitively(privateConnectionId); err == nil { + privateConnectionId = serviceId.ID() + } + } attrs["private_connection_resource_id"] = privateConnectionId } diff --git a/internal/services/network/virtual_network_gateway_resource.go b/internal/services/network/virtual_network_gateway_resource.go index b04ad27c485a..61195634494f 100644 --- a/internal/services/network/virtual_network_gateway_resource.go +++ b/internal/services/network/virtual_network_gateway_resource.go @@ -122,7 +122,7 @@ func resourceVirtualNetworkGateway() *pluginsdk.Resource { "ip_configuration": { Type: pluginsdk.TypeList, Required: true, - MaxItems: 2, + MaxItems: 3, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ "name": { diff --git a/internal/services/network/virtual_network_gateway_resource_test.go b/internal/services/network/virtual_network_gateway_resource_test.go index 7636908aaead..06718fdbf8be 100644 --- a/internal/services/network/virtual_network_gateway_resource_test.go +++ b/internal/services/network/virtual_network_gateway_resource_test.go @@ -93,6 +93,20 @@ func TestAccVirtualNetworkGateway_activeActive(t *testing.T) { }) } +func TestAccVirtualNetworkGateway_activeActiveZoneRedundantWithP2S(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_virtual_network_gateway", "test") + r := VirtualNetworkGatewayResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.activeActiveZoneRedundantWithP2S(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + func TestAccVirtualNetworkGateway_standard(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_network_gateway", "test") r := VirtualNetworkGatewayResource{} @@ -605,6 +619,118 @@ resource "azurerm_virtual_network_gateway" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func (VirtualNetworkGatewayResource) activeActiveZoneRedundantWithP2S(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "test" { + name = "GatewaySubnet" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.0.1.0/24" +} + +resource "azurerm_public_ip" "first" { + name = "acctestpip1-%[1]d" + + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + allocation_method = "Static" + sku = "Standard" + availability_zone = "Zone-Redundant" +} + +resource "azurerm_public_ip" "second" { + name = "acctestpip2-%[1]d" + + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + allocation_method = "Static" + sku = "Standard" + availability_zone = "Zone-Redundant" +} + +resource "azurerm_public_ip" "thirth" { + name = "acctestpip3-%[1]d" + + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + allocation_method = "Static" + sku = "Standard" + availability_zone = "Zone-Redundant" +} + +resource "azurerm_virtual_network_gateway" "test" { + depends_on = [ + azurerm_public_ip.first, + azurerm_public_ip.second, + azurerm_public_ip.thirth, + ] + name = "acctestvng-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + type = "Vpn" + vpn_type = "RouteBased" + sku = "VpnGw1AZ" + + active_active = true + enable_bgp = true + + ip_configuration { + name = "gw-ip1" + public_ip_address_id = azurerm_public_ip.first.id + + private_ip_address_allocation = "Dynamic" + subnet_id = azurerm_subnet.test.id + } + + ip_configuration { + name = "gw-ip2" + public_ip_address_id = azurerm_public_ip.second.id + + private_ip_address_allocation = "Dynamic" + subnet_id = azurerm_subnet.test.id + } + + ip_configuration { + name = "gw-ip3" + public_ip_address_id = azurerm_public_ip.thirth.id + + private_ip_address_allocation = "Dynamic" + subnet_id = azurerm_subnet.test.id + } + + vpn_client_configuration { + address_space = ["10.2.0.0/24"] + vpn_client_protocols = ["OpenVPN"] + + aad_tenant = "https://login.microsoftonline.com/%[3]s/" + aad_audience = "41b23e61-6c1e-4545-b367-cd054e0ed4b4" + aad_issuer = "https://sts.windows.net/%[3]s/" + } + + bgp_settings { + asn = "65010" + } +} +`, data.RandomInteger, data.Locations.Primary, data.Client().TenantID) +} + func (VirtualNetworkGatewayResource) vpnClientConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/internal/services/powerbi/powerbi_embedded_resource.go b/internal/services/powerbi/powerbi_embedded_resource.go index 3fde3b313e21..8ce78b40541b 100644 --- a/internal/services/powerbi/powerbi_embedded_resource.go +++ b/internal/services/powerbi/powerbi_embedded_resource.go @@ -150,8 +150,8 @@ func resourcePowerBIEmbeddedRead(d *pluginsdk.ResourceData, meta interface{}) er return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.CapacityName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.DedicatedCapacityName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", location.Normalize(model.Location)) diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/client.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/client.go new file mode 100644 index 000000000000..a7625c7b9ece --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/client.go @@ -0,0 +1,15 @@ +package autoscalevcores + +import "github.com/Azure/go-autorest/autorest" + +type AutoScaleVCoresClient struct { + Client autorest.Client + baseUri string +} + +func NewAutoScaleVCoresClientWithBaseURI(endpoint string) AutoScaleVCoresClient { + return AutoScaleVCoresClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/constants.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/constants.go new file mode 100644 index 000000000000..827f158a5089 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/constants.go @@ -0,0 +1,87 @@ +package autoscalevcores + +import "strings" + +type IdentityType string + +const ( + IdentityTypeApplication IdentityType = "Application" + IdentityTypeKey IdentityType = "Key" + IdentityTypeManagedIdentity IdentityType = "ManagedIdentity" + IdentityTypeUser IdentityType = "User" +) + +func PossibleValuesForIdentityType() []string { + return []string{ + string(IdentityTypeApplication), + string(IdentityTypeKey), + string(IdentityTypeManagedIdentity), + string(IdentityTypeUser), + } +} + +func parseIdentityType(input string) (*IdentityType, error) { + vals := map[string]IdentityType{ + "application": IdentityTypeApplication, + "key": IdentityTypeKey, + "managedidentity": IdentityTypeManagedIdentity, + "user": IdentityTypeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IdentityType(input) + return &out, nil +} + +type VCoreProvisioningState string + +const ( + VCoreProvisioningStateSucceeded VCoreProvisioningState = "Succeeded" +) + +func PossibleValuesForVCoreProvisioningState() []string { + return []string{ + string(VCoreProvisioningStateSucceeded), + } +} + +func parseVCoreProvisioningState(input string) (*VCoreProvisioningState, error) { + vals := map[string]VCoreProvisioningState{ + "succeeded": VCoreProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VCoreProvisioningState(input) + return &out, nil +} + +type VCoreSkuTier string + +const ( + VCoreSkuTierAutoScale VCoreSkuTier = "AutoScale" +) + +func PossibleValuesForVCoreSkuTier() []string { + return []string{ + string(VCoreSkuTierAutoScale), + } +} + +func parseVCoreSkuTier(input string) (*VCoreSkuTier, error) { + vals := map[string]VCoreSkuTier{ + "autoscale": VCoreSkuTierAutoScale, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VCoreSkuTier(input) + return &out, nil +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_autoscalevcore.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_autoscalevcore.go new file mode 100644 index 000000000000..2ac6b000cee0 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_autoscalevcore.go @@ -0,0 +1,124 @@ +package autoscalevcores + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = AutoScaleVCoreId{} + +// AutoScaleVCoreId is a struct representing the Resource ID for a Auto Scale V Core +type AutoScaleVCoreId struct { + SubscriptionId string + ResourceGroupName string + VcoreName string +} + +// NewAutoScaleVCoreID returns a new AutoScaleVCoreId struct +func NewAutoScaleVCoreID(subscriptionId string, resourceGroupName string, vcoreName string) AutoScaleVCoreId { + return AutoScaleVCoreId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + VcoreName: vcoreName, + } +} + +// ParseAutoScaleVCoreID parses 'input' into a AutoScaleVCoreId +func ParseAutoScaleVCoreID(input string) (*AutoScaleVCoreId, error) { + parser := resourceids.NewParserFromResourceIdType(AutoScaleVCoreId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AutoScaleVCoreId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.VcoreName, ok = parsed.Parsed["vcoreName"]; !ok { + return nil, fmt.Errorf("the segment 'vcoreName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseAutoScaleVCoreIDInsensitively parses 'input' case-insensitively into a AutoScaleVCoreId +// note: this method should only be used for API response data and not user input +func ParseAutoScaleVCoreIDInsensitively(input string) (*AutoScaleVCoreId, error) { + parser := resourceids.NewParserFromResourceIdType(AutoScaleVCoreId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AutoScaleVCoreId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.VcoreName, ok = parsed.Parsed["vcoreName"]; !ok { + return nil, fmt.Errorf("the segment 'vcoreName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateAutoScaleVCoreID checks that 'input' can be parsed as a Auto Scale V Core ID +func ValidateAutoScaleVCoreID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAutoScaleVCoreID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Auto Scale V Core ID +func (id AutoScaleVCoreId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.PowerBIDedicated/autoScaleVCores/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.VcoreName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Auto Scale V Core ID +func (id AutoScaleVCoreId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftPowerBIDedicated", "Microsoft.PowerBIDedicated", "Microsoft.PowerBIDedicated"), + resourceids.StaticSegment("autoScaleVCores", "autoScaleVCores", "autoScaleVCores"), + resourceids.UserSpecifiedSegment("vcoreName", "vcoreValue"), + } +} + +// String returns a human-readable description of this Auto Scale V Core ID +func (id AutoScaleVCoreId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Vcore Name: %q", id.VcoreName), + } + return fmt.Sprintf("Auto Scale V Core (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_autoscalevcore_test.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_autoscalevcore_test.go new file mode 100644 index 000000000000..4abe6424836a --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_autoscalevcore_test.go @@ -0,0 +1,264 @@ +package autoscalevcores + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = AutoScaleVCoreId{} + +func TestNewAutoScaleVCoreID(t *testing.T) { + id := NewAutoScaleVCoreID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vcoreValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.VcoreName != "vcoreValue" { + t.Fatalf("Expected %q but got %q for Segment 'VcoreName'", id.VcoreName, "vcoreValue") + } +} + +func TestFormatAutoScaleVCoreID(t *testing.T) { + actual := NewAutoScaleVCoreID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vcoreValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores/vcoreValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseAutoScaleVCoreID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AutoScaleVCoreId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores/vcoreValue", + Expected: &AutoScaleVCoreId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + VcoreName: "vcoreValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores/vcoreValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAutoScaleVCoreID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.VcoreName != v.Expected.VcoreName { + t.Fatalf("Expected %q but got %q for VcoreName", v.Expected.VcoreName, actual.VcoreName) + } + + } +} + +func TestParseAutoScaleVCoreIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AutoScaleVCoreId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/aUtOsCaLeVcOrEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores/vcoreValue", + Expected: &AutoScaleVCoreId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + VcoreName: "vcoreValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/autoScaleVCores/vcoreValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/aUtOsCaLeVcOrEs/vCoReVaLuE", + Expected: &AutoScaleVCoreId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + VcoreName: "vCoReVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/aUtOsCaLeVcOrEs/vCoReVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAutoScaleVCoreIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.VcoreName != v.Expected.VcoreName { + t.Fatalf("Expected %q but got %q for VcoreName", v.Expected.VcoreName, actual.VcoreName) + } + + } +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_resourcegroup.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_resourcegroup.go new file mode 100644 index 000000000000..8e1b3ed8d1ce --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_resourcegroup.go @@ -0,0 +1,109 @@ +package autoscalevcores + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group +type ResourceGroupId struct { + SubscriptionId string + ResourceGroupName string +} + +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { + return ResourceGroupId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + } +} + +// ParseResourceGroupID parses 'input' into a ResourceGroupId +func ParseResourceGroupID(input string) (*ResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input +func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + } +} + +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_resourcegroup_test.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_resourcegroup_test.go new file mode 100644 index 000000000000..6f734268c646 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_resourcegroup_test.go @@ -0,0 +1,189 @@ +package autoscalevcores + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = ResourceGroupId{} + +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseResourceGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Expected: &ResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseResourceGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestParseResourceGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Expected: &ResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Expected: &ResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseResourceGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_subscription.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_subscription.go new file mode 100644 index 000000000000..740d99f27486 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_subscription.go @@ -0,0 +1,96 @@ +package autoscalevcores + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription +type SubscriptionId struct { + SubscriptionId string +} + +// NewSubscriptionID returns a new SubscriptionId struct +func NewSubscriptionID(subscriptionId string) SubscriptionId { + return SubscriptionId{ + SubscriptionId: subscriptionId, + } +} + +// ParseSubscriptionID parses 'input' into a SubscriptionId +func ParseSubscriptionID(input string) (*SubscriptionId, error) { + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input +func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_subscription_test.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_subscription_test.go new file mode 100644 index 000000000000..16385280abf9 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/id_subscription_test.go @@ -0,0 +1,144 @@ +package autoscalevcores + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = SubscriptionId{} + +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseSubscriptionID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Expected: &SubscriptionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + } +} + +func TestParseSubscriptionIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Expected: &SubscriptionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Expected: &SubscriptionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + } +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_create_autorest.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_create_autorest.go new file mode 100644 index 000000000000..372c0a5d2f76 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_create_autorest.go @@ -0,0 +1,65 @@ +package autoscalevcores + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateResponse struct { + HttpResponse *http.Response + Model *AutoScaleVCore +} + +// Create ... +func (c AutoScaleVCoresClient) Create(ctx context.Context, id AutoScaleVCoreId, input AutoScaleVCore) (result CreateResponse, err error) { + req, err := c.preparerForCreate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Create", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Create", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Create", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreate prepares the Create request. +func (c AutoScaleVCoresClient) preparerForCreate(ctx context.Context, id AutoScaleVCoreId, input AutoScaleVCore) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreate handles the response to the Create request. The method always +// closes the http.Response Body. +func (c AutoScaleVCoresClient) responderForCreate(resp *http.Response) (result CreateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_delete_autorest.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_delete_autorest.go new file mode 100644 index 000000000000..8bf6c4cdb9a5 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_delete_autorest.go @@ -0,0 +1,61 @@ +package autoscalevcores + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c AutoScaleVCoresClient) Delete(ctx context.Context, id AutoScaleVCoreId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c AutoScaleVCoresClient) preparerForDelete(ctx context.Context, id AutoScaleVCoreId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c AutoScaleVCoresClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_get_autorest.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_get_autorest.go new file mode 100644 index 000000000000..9b3ff12c2066 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_get_autorest.go @@ -0,0 +1,64 @@ +package autoscalevcores + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *AutoScaleVCore +} + +// Get ... +func (c AutoScaleVCoresClient) Get(ctx context.Context, id AutoScaleVCoreId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c AutoScaleVCoresClient) preparerForGet(ctx context.Context, id AutoScaleVCoreId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c AutoScaleVCoresClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_listbyresourcegroup_autorest.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_listbyresourcegroup_autorest.go new file mode 100644 index 000000000000..9dabf28e853a --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_listbyresourcegroup_autorest.go @@ -0,0 +1,65 @@ +package autoscalevcores + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListByResourceGroupResponse struct { + HttpResponse *http.Response + Model *AutoScaleVCoreListResult +} + +// ListByResourceGroup ... +func (c AutoScaleVCoresClient) ListByResourceGroup(ctx context.Context, id ResourceGroupId) (result ListByResourceGroupResponse, err error) { + req, err := c.preparerForListByResourceGroup(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "ListByResourceGroup", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByResourceGroup(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "ListByResourceGroup", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForListByResourceGroup prepares the ListByResourceGroup request. +func (c AutoScaleVCoresClient) preparerForListByResourceGroup(ctx context.Context, id ResourceGroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.PowerBIDedicated/autoScaleVCores", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByResourceGroup handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (c AutoScaleVCoresClient) responderForListByResourceGroup(resp *http.Response) (result ListByResourceGroupResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_listbysubscription_autorest.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_listbysubscription_autorest.go new file mode 100644 index 000000000000..722376fb7333 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_listbysubscription_autorest.go @@ -0,0 +1,65 @@ +package autoscalevcores + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListBySubscriptionResponse struct { + HttpResponse *http.Response + Model *AutoScaleVCoreListResult +} + +// ListBySubscription ... +func (c AutoScaleVCoresClient) ListBySubscription(ctx context.Context, id SubscriptionId) (result ListBySubscriptionResponse, err error) { + req, err := c.preparerForListBySubscription(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "ListBySubscription", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListBySubscription(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "ListBySubscription", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForListBySubscription prepares the ListBySubscription request. +func (c AutoScaleVCoresClient) preparerForListBySubscription(ctx context.Context, id SubscriptionId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.PowerBIDedicated/autoScaleVCores", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListBySubscription handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (c AutoScaleVCoresClient) responderForListBySubscription(resp *http.Response) (result ListBySubscriptionResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_update_autorest.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_update_autorest.go new file mode 100644 index 000000000000..ef8391901d01 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/method_update_autorest.go @@ -0,0 +1,65 @@ +package autoscalevcores + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type UpdateResponse struct { + HttpResponse *http.Response + Model *AutoScaleVCore +} + +// Update ... +func (c AutoScaleVCoresClient) Update(ctx context.Context, id AutoScaleVCoreId, input AutoScaleVCoreUpdateParameters) (result UpdateResponse, err error) { + req, err := c.preparerForUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Update", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Update", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "autoscalevcores.AutoScaleVCoresClient", "Update", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForUpdate prepares the Update request. +func (c AutoScaleVCoresClient) preparerForUpdate(ctx context.Context, id AutoScaleVCoreId, input AutoScaleVCoreUpdateParameters) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForUpdate handles the response to the Update request. The method always +// closes the http.Response Body. +func (c AutoScaleVCoresClient) responderForUpdate(resp *http.Response) (result UpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcore.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcore.go new file mode 100644 index 000000000000..db52cb1f77d1 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcore.go @@ -0,0 +1,12 @@ +package autoscalevcores + +type AutoScaleVCore struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *AutoScaleVCoreProperties `json:"properties,omitempty"` + Sku AutoScaleVCoreSku `json:"sku"` + SystemData *SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcorelistresult.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcorelistresult.go new file mode 100644 index 000000000000..6734ef056c57 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcorelistresult.go @@ -0,0 +1,5 @@ +package autoscalevcores + +type AutoScaleVCoreListResult struct { + Value []AutoScaleVCore `json:"value"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoremutableproperties.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoremutableproperties.go new file mode 100644 index 000000000000..113f9078c786 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoremutableproperties.go @@ -0,0 +1,5 @@ +package autoscalevcores + +type AutoScaleVCoreMutableProperties struct { + CapacityLimit *int64 `json:"capacityLimit,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoreproperties.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoreproperties.go new file mode 100644 index 000000000000..74342b8fd1ff --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoreproperties.go @@ -0,0 +1,7 @@ +package autoscalevcores + +type AutoScaleVCoreProperties struct { + CapacityLimit *int64 `json:"capacityLimit,omitempty"` + CapacityObjectId *string `json:"capacityObjectId,omitempty"` + ProvisioningState *VCoreProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoresku.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoresku.go new file mode 100644 index 000000000000..6c6feab87eea --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoresku.go @@ -0,0 +1,7 @@ +package autoscalevcores + +type AutoScaleVCoreSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Name string `json:"name"` + Tier *VCoreSkuTier `json:"tier,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoreupdateparameters.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoreupdateparameters.go new file mode 100644 index 000000000000..7f5d6193331e --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_autoscalevcoreupdateparameters.go @@ -0,0 +1,7 @@ +package autoscalevcores + +type AutoScaleVCoreUpdateParameters struct { + Properties *AutoScaleVCoreMutableProperties `json:"properties,omitempty"` + Sku *AutoScaleVCoreSku `json:"sku,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_systemdata.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_systemdata.go new file mode 100644 index 000000000000..5c0537b0207e --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/model_systemdata.go @@ -0,0 +1,10 @@ +package autoscalevcores + +type SystemData struct { + CreatedAt *string `json:"createdAt,omitempty"` + CreatedBy *string `json:"createdBy,omitempty"` + CreatedByType *IdentityType `json:"createdByType,omitempty"` + LastModifiedAt *string `json:"lastModifiedAt,omitempty"` + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + LastModifiedByType *IdentityType `json:"lastModifiedByType,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/version.go b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/version.go new file mode 100644 index 000000000000..47778c7d2579 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/autoscalevcores/version.go @@ -0,0 +1,9 @@ +package autoscalevcores + +import "fmt" + +const defaultApiVersion = "2021-01-01" + +func userAgent() string { + return fmt.Sprintf("pandora/autoscalevcores/%s", defaultApiVersion) +} diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/constants.go b/internal/services/powerbi/sdk/2021-01-01/capacities/constants.go index 15fb5e891da7..566d7842dc47 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/constants.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/constants.go @@ -1,5 +1,7 @@ package capacities +import "strings" + type CapacityProvisioningState string const ( @@ -17,6 +19,47 @@ const ( CapacityProvisioningStateUpdating CapacityProvisioningState = "Updating" ) +func PossibleValuesForCapacityProvisioningState() []string { + return []string{ + string(CapacityProvisioningStateDeleting), + string(CapacityProvisioningStateFailed), + string(CapacityProvisioningStatePaused), + string(CapacityProvisioningStatePausing), + string(CapacityProvisioningStatePreparing), + string(CapacityProvisioningStateProvisioning), + string(CapacityProvisioningStateResuming), + string(CapacityProvisioningStateScaling), + string(CapacityProvisioningStateSucceeded), + string(CapacityProvisioningStateSuspended), + string(CapacityProvisioningStateSuspending), + string(CapacityProvisioningStateUpdating), + } +} + +func parseCapacityProvisioningState(input string) (*CapacityProvisioningState, error) { + vals := map[string]CapacityProvisioningState{ + "deleting": CapacityProvisioningStateDeleting, + "failed": CapacityProvisioningStateFailed, + "paused": CapacityProvisioningStatePaused, + "pausing": CapacityProvisioningStatePausing, + "preparing": CapacityProvisioningStatePreparing, + "provisioning": CapacityProvisioningStateProvisioning, + "resuming": CapacityProvisioningStateResuming, + "scaling": CapacityProvisioningStateScaling, + "succeeded": CapacityProvisioningStateSucceeded, + "suspended": CapacityProvisioningStateSuspended, + "suspending": CapacityProvisioningStateSuspending, + "updating": CapacityProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CapacityProvisioningState(input) + return &out, nil +} + type CapacitySkuTier string const ( @@ -25,6 +68,29 @@ const ( CapacitySkuTierPremium CapacitySkuTier = "Premium" ) +func PossibleValuesForCapacitySkuTier() []string { + return []string{ + string(CapacitySkuTierAutoPremiumHost), + string(CapacitySkuTierPBIEAzure), + string(CapacitySkuTierPremium), + } +} + +func parseCapacitySkuTier(input string) (*CapacitySkuTier, error) { + vals := map[string]CapacitySkuTier{ + "autopremiumhost": CapacitySkuTierAutoPremiumHost, + "pbie_azure": CapacitySkuTierPBIEAzure, + "premium": CapacitySkuTierPremium, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CapacitySkuTier(input) + return &out, nil +} + type IdentityType string const ( @@ -34,6 +100,31 @@ const ( IdentityTypeUser IdentityType = "User" ) +func PossibleValuesForIdentityType() []string { + return []string{ + string(IdentityTypeApplication), + string(IdentityTypeKey), + string(IdentityTypeManagedIdentity), + string(IdentityTypeUser), + } +} + +func parseIdentityType(input string) (*IdentityType, error) { + vals := map[string]IdentityType{ + "application": IdentityTypeApplication, + "key": IdentityTypeKey, + "managedidentity": IdentityTypeManagedIdentity, + "user": IdentityTypeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IdentityType(input) + return &out, nil +} + type Mode string const ( @@ -41,6 +132,27 @@ const ( ModeGenTwo Mode = "Gen2" ) +func PossibleValuesForMode() []string { + return []string{ + string(ModeGenOne), + string(ModeGenTwo), + } +} + +func parseMode(input string) (*Mode, error) { + vals := map[string]Mode{ + "gen1": ModeGenOne, + "gen2": ModeGenTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Mode(input) + return &out, nil +} + type State string const ( @@ -57,3 +169,44 @@ const ( StateSuspending State = "Suspending" StateUpdating State = "Updating" ) + +func PossibleValuesForState() []string { + return []string{ + string(StateDeleting), + string(StateFailed), + string(StatePaused), + string(StatePausing), + string(StatePreparing), + string(StateProvisioning), + string(StateResuming), + string(StateScaling), + string(StateSucceeded), + string(StateSuspended), + string(StateSuspending), + string(StateUpdating), + } +} + +func parseState(input string) (*State, error) { + vals := map[string]State{ + "deleting": StateDeleting, + "failed": StateFailed, + "paused": StatePaused, + "pausing": StatePausing, + "preparing": StatePreparing, + "provisioning": StateProvisioning, + "resuming": StateResuming, + "scaling": StateScaling, + "succeeded": StateSucceeded, + "suspended": StateSuspended, + "suspending": StateSuspending, + "updating": StateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := State(input) + return &out, nil +} diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities.go index ab339450cf3d..3349915de44c 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = CapacitiesId{} + +// CapacitiesId is a struct representing the Resource ID for a Capacities type CapacitiesId struct { - SubscriptionId string - ResourceGroup string - CapacityName string + SubscriptionId string + ResourceGroupName string + DedicatedCapacityName string } -func NewCapacitiesID(subscriptionId, resourceGroup, capacityName string) CapacitiesId { +// NewCapacitiesID returns a new CapacitiesId struct +func NewCapacitiesID(subscriptionId string, resourceGroupName string, dedicatedCapacityName string) CapacitiesId { return CapacitiesId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - CapacityName: capacityName, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + DedicatedCapacityName: dedicatedCapacityName, } } -func (id CapacitiesId) String() string { - segments := []string{ - fmt.Sprintf("Capacity Name %q", id.CapacityName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Capacities", segmentsStr) -} - -func (id CapacitiesId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.PowerBIDedicated/capacities/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.CapacityName) -} - -// ParseCapacitiesID parses a Capacities ID into an CapacitiesId struct +// ParseCapacitiesID parses 'input' into a CapacitiesId func ParseCapacitiesID(input string) (*CapacitiesId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(CapacitiesId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := CapacitiesId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := CapacitiesId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.CapacityName, err = id.PopSegment("capacities"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.DedicatedCapacityName, ok = parsed.Parsed["dedicatedCapacityName"]; !ok { + return nil, fmt.Errorf("the segment 'dedicatedCapacityName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseCapacitiesIDInsensitively parses an Capacities ID into an CapacitiesId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseCapacitiesID method should be used instead for validation etc. +// ParseCapacitiesIDInsensitively parses 'input' case-insensitively into a CapacitiesId +// note: this method should only be used for API response data and not user input func ParseCapacitiesIDInsensitively(input string) (*CapacitiesId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(CapacitiesId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := CapacitiesId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := CapacitiesId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.DedicatedCapacityName, ok = parsed.Parsed["dedicatedCapacityName"]; !ok { + return nil, fmt.Errorf("the segment 'dedicatedCapacityName' was not found in the resource id %q", input) } - // find the correct casing for the 'capacities' segment - capacitiesKey := "capacities" - for key := range id.Path { - if strings.EqualFold(key, capacitiesKey) { - capacitiesKey = key - break - } + return &id, nil +} + +// ValidateCapacitiesID checks that 'input' can be parsed as a Capacities ID +func ValidateCapacitiesID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.CapacityName, err = id.PopSegment(capacitiesKey); err != nil { - return nil, err + + if _, err := ParseCapacitiesID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Capacities ID +func (id CapacitiesId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.PowerBIDedicated/capacities/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.DedicatedCapacityName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Capacities ID +func (id CapacitiesId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftPowerBIDedicated", "Microsoft.PowerBIDedicated", "Microsoft.PowerBIDedicated"), + resourceids.StaticSegment("capacities", "capacities", "capacities"), + resourceids.UserSpecifiedSegment("dedicatedCapacityName", "dedicatedCapacityValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Capacities ID +func (id CapacitiesId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Dedicated Capacity Name: %q", id.DedicatedCapacityName), + } + return fmt.Sprintf("Capacities (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities_test.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities_test.go index 13712863cdfc..67bc097327c1 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities_test.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_capacities_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = CapacitiesId{} +var _ resourceids.ResourceId = CapacitiesId{} -func TestCapacitiesIDFormatter(t *testing.T) { - actual := NewCapacitiesID("{subscriptionId}", "{resourceGroupName}", "{dedicatedCapacityName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}" +func TestNewCapacitiesID(t *testing.T) { + id := NewCapacitiesID("12345678-1234-9876-4563-123456789012", "example-resource-group", "dedicatedCapacityValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.DedicatedCapacityName != "dedicatedCapacityValue" { + t.Fatalf("Expected %q but got %q for Segment 'DedicatedCapacityName'", id.DedicatedCapacityName, "dedicatedCapacityValue") + } +} + +func TestFormatCapacitiesID(t *testing.T) { + actual := NewCapacitiesID("12345678-1234-9876-4563-123456789012", "example-resource-group", "dedicatedCapacityValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities/dedicatedCapacityValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseCapacitiesID(t *testing.T) { Error bool Expected *CapacitiesId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing CapacityName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for CapacityName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities/dedicatedCapacityValue", Expected: &CapacitiesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - CapacityName: "{dedicatedCapacityName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + DedicatedCapacityName: "dedicatedCapacityValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.POWERBIDEDICATED/CAPACITIES/{DEDICATEDCAPACITYNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities/dedicatedCapacityValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseCapacitiesID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseCapacitiesID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.CapacityName != v.Expected.CapacityName { - t.Fatalf("Expected %q but got %q for CapacityName", v.Expected.CapacityName, actual.CapacityName) + + if actual.DedicatedCapacityName != v.Expected.DedicatedCapacityName { + t.Fatalf("Expected %q but got %q for DedicatedCapacityName", v.Expected.DedicatedCapacityName, actual.DedicatedCapacityName) } + } } @@ -115,90 +129,110 @@ func TestParseCapacitiesIDInsensitively(t *testing.T) { Error bool Expected *CapacitiesId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing CapacityName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for CapacityName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}", - Expected: &CapacitiesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - CapacityName: "{dedicatedCapacityName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}", - Expected: &CapacitiesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - CapacityName: "{dedicatedCapacityName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/CAPACITIES/{dedicatedCapacityName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/cApAcItIeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities/dedicatedCapacityValue", Expected: &CapacitiesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - CapacityName: "{dedicatedCapacityName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + DedicatedCapacityName: "dedicatedCapacityValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/CaPaCiTiEs/{dedicatedCapacityName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.PowerBIDedicated/capacities/dedicatedCapacityValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/cApAcItIeS/dEdIcAtEdCaPaCiTyVaLuE", Expected: &CapacitiesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - CapacityName: "{dedicatedCapacityName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + DedicatedCapacityName: "dEdIcAtEdCaPaCiTyVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/cApAcItIeS/dEdIcAtEdCaPaCiTyVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseCapacitiesIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseCapacitiesIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.CapacityName != v.Expected.CapacityName { - t.Fatalf("Expected %q but got %q for CapacityName", v.Expected.CapacityName, actual.CapacityName) + + if actual.DedicatedCapacityName != v.Expected.DedicatedCapacityName { + t.Fatalf("Expected %q but got %q for DedicatedCapacityName", v.Expected.DedicatedCapacityName, actual.DedicatedCapacityName) } + } } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_location.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_location.go index 5a0a0e671b73..06db7f0ea427 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_location.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_location.go @@ -7,89 +7,105 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = LocationId{} + +// LocationId is a struct representing the Resource ID for a Location type LocationId struct { SubscriptionId string - Name string + Location string } -func NewLocationID(subscriptionId, name string) LocationId { +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, location string) LocationId { return LocationId{ SubscriptionId: subscriptionId, - Name: name, + Location: location, } } -func (id LocationId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Location", segmentsStr) -} - -func (id LocationId) ID() string { - fmtString := "/subscriptions/%s/providers/Microsoft.PowerBIDedicated/locations/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.Name) -} - -// ParseLocationID parses a Location ID into an LocationId struct +// ParseLocationID parses 'input' into a LocationId func ParseLocationID(input string) (*LocationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := LocationId{ - SubscriptionId: id.SubscriptionID, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := LocationId{} - if resourceId.Name, err = id.PopSegment("locations"); err != nil { - return nil, err + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseLocationIDInsensitively parses an Location ID into an LocationId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseLocationID method should be used instead for validation etc. +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input func ParseLocationIDInsensitively(input string) (*LocationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := LocationId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) } - // find the correct casing for the 'locations' segment - locationsKey := "locations" - for key := range id.Path { - if strings.EqualFold(key, locationsKey) { - locationsKey = key - break - } + return &id, nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(locationsKey); err != nil { - return nil, err + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.PowerBIDedicated/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.Location) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftPowerBIDedicated", "Microsoft.PowerBIDedicated", "Microsoft.PowerBIDedicated"), + resourceids.StaticSegment("locations", "locations", "locations"), + resourceids.UserSpecifiedSegment("location", "locationValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location: %q", id.Location), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_location_test.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_location_test.go index d388d66558b8..d7c765a36379 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_location_test.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_location_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = LocationId{} +var _ resourceids.ResourceId = LocationId{} -func TestLocationIDFormatter(t *testing.T) { - actual := NewLocationID("{subscriptionId}", "{location}").ID() - expected := "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/{location}" +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.Location != "locationValue" { + t.Fatalf("Expected %q but got %q for Segment 'Location'", id.Location, "locationValue") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations/locationValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,50 @@ func TestParseLocationID(t *testing.T) { Error bool Expected *LocationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/{location}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations/locationValue", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "locationValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/PROVIDERS/MICROSOFT.POWERBIDEDICATED/LOCATIONS/{LOCATION}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations/locationValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +87,7 @@ func TestParseLocationID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +96,11 @@ func TestParseLocationID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.Location != v.Expected.Location { + t.Fatalf("Expected %q but got %q for Location", v.Expected.Location, actual.Location) } + } } @@ -99,74 +110,88 @@ func TestParseLocationIDInsensitively(t *testing.T) { Error bool Expected *LocationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/{location}", - Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/{location}", - Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/LOCATIONS/{location}", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations/locationValue", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "locationValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/LoCaTiOnS/{location}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.PowerBIDedicated/locations/locationValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/lOcAtIoNs/lOcAtIoNvAlUe", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "lOcAtIoNvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.pOwErBiDeDiCaTeD/lOcAtIoNs/lOcAtIoNvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +201,7 @@ func TestParseLocationIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +210,10 @@ func TestParseLocationIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.Location != v.Expected.Location { + t.Fatalf("Expected %q but got %q for Location", v.Expected.Location, actual.Location) } + } } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup.go index 3da781140fbe..7e9c16e2e162 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup_test.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup_test.go index f83fe4665a97..988e78477809 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup_test.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,10 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription.go index fd8551c377f5..c7798def1118 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription_test.go b/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription_test.go index 657d640e764d..5dd88810947b 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription_test.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,6 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } diff --git a/internal/services/powerbi/sdk/2021-01-01/capacities/model_systemdata.go b/internal/services/powerbi/sdk/2021-01-01/capacities/model_systemdata.go index d8b8d1a84afc..f6c63b0abe30 100644 --- a/internal/services/powerbi/sdk/2021-01-01/capacities/model_systemdata.go +++ b/internal/services/powerbi/sdk/2021-01-01/capacities/model_systemdata.go @@ -1,11 +1,5 @@ package capacities -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - type SystemData struct { CreatedAt *string `json:"createdAt,omitempty"` CreatedBy *string `json:"createdBy,omitempty"` @@ -14,21 +8,3 @@ type SystemData struct { LastModifiedBy *string `json:"lastModifiedBy,omitempty"` LastModifiedByType *IdentityType `json:"lastModifiedByType,omitempty"` } - -func (o SystemData) GetCreatedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o SystemData) SetCreatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.CreatedAt = &formatted -} - -func (o SystemData) GetLastModifiedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.LastModifiedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o SystemData) SetLastModifiedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.LastModifiedAt = &formatted -} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/client.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/client.go new file mode 100644 index 000000000000..634a13539d50 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/client.go @@ -0,0 +1,15 @@ +package powerbidedicated + +import "github.com/Azure/go-autorest/autorest" + +type PowerBIDedicatedClient struct { + Client autorest.Client + baseUri string +} + +func NewPowerBIDedicatedClientWithBaseURI(endpoint string) PowerBIDedicatedClient { + return PowerBIDedicatedClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/constants.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/constants.go new file mode 100644 index 000000000000..fd2ea43d5eb4 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/constants.go @@ -0,0 +1,34 @@ +package powerbidedicated + +import "strings" + +type CapacitySkuTier string + +const ( + CapacitySkuTierAutoPremiumHost CapacitySkuTier = "AutoPremiumHost" + CapacitySkuTierPBIEAzure CapacitySkuTier = "PBIE_Azure" + CapacitySkuTierPremium CapacitySkuTier = "Premium" +) + +func PossibleValuesForCapacitySkuTier() []string { + return []string{ + string(CapacitySkuTierAutoPremiumHost), + string(CapacitySkuTierPBIEAzure), + string(CapacitySkuTierPremium), + } +} + +func parseCapacitySkuTier(input string) (*CapacitySkuTier, error) { + vals := map[string]CapacitySkuTier{ + "autopremiumhost": CapacitySkuTierAutoPremiumHost, + "pbie_azure": CapacitySkuTierPBIEAzure, + "premium": CapacitySkuTierPremium, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CapacitySkuTier(input) + return &out, nil +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/id_subscription.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/id_subscription.go new file mode 100644 index 000000000000..97a9436f9a7e --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/id_subscription.go @@ -0,0 +1,96 @@ +package powerbidedicated + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription +type SubscriptionId struct { + SubscriptionId string +} + +// NewSubscriptionID returns a new SubscriptionId struct +func NewSubscriptionID(subscriptionId string) SubscriptionId { + return SubscriptionId{ + SubscriptionId: subscriptionId, + } +} + +// ParseSubscriptionID parses 'input' into a SubscriptionId +func ParseSubscriptionID(input string) (*SubscriptionId, error) { + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input +func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/id_subscription_test.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/id_subscription_test.go new file mode 100644 index 000000000000..5cb0fafce41a --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/id_subscription_test.go @@ -0,0 +1,144 @@ +package powerbidedicated + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = SubscriptionId{} + +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseSubscriptionID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Expected: &SubscriptionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + } +} + +func TestParseSubscriptionIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Expected: &SubscriptionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Expected: &SubscriptionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + } +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/method_capacitieslistskus_autorest.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/method_capacitieslistskus_autorest.go new file mode 100644 index 000000000000..0554f907df48 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/method_capacitieslistskus_autorest.go @@ -0,0 +1,65 @@ +package powerbidedicated + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CapacitiesListSkusResponse struct { + HttpResponse *http.Response + Model *SkuEnumerationForNewResourceResult +} + +// CapacitiesListSkus ... +func (c PowerBIDedicatedClient) CapacitiesListSkus(ctx context.Context, id SubscriptionId) (result CapacitiesListSkusResponse, err error) { + req, err := c.preparerForCapacitiesListSkus(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbidedicated.PowerBIDedicatedClient", "CapacitiesListSkus", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbidedicated.PowerBIDedicatedClient", "CapacitiesListSkus", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCapacitiesListSkus(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbidedicated.PowerBIDedicatedClient", "CapacitiesListSkus", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCapacitiesListSkus prepares the CapacitiesListSkus request. +func (c PowerBIDedicatedClient) preparerForCapacitiesListSkus(ctx context.Context, id SubscriptionId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.PowerBIDedicated/skus", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCapacitiesListSkus handles the response to the CapacitiesListSkus request. The method always +// closes the http.Response Body. +func (c PowerBIDedicatedClient) responderForCapacitiesListSkus(resp *http.Response) (result CapacitiesListSkusResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/model_capacitysku.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/model_capacitysku.go new file mode 100644 index 000000000000..6995f07253f8 --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/model_capacitysku.go @@ -0,0 +1,6 @@ +package powerbidedicated + +type CapacitySku struct { + Name string `json:"name"` + Tier *CapacitySkuTier `json:"tier,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/model_skuenumerationfornewresourceresult.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/model_skuenumerationfornewresourceresult.go new file mode 100644 index 000000000000..846266cd7a7b --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/model_skuenumerationfornewresourceresult.go @@ -0,0 +1,5 @@ +package powerbidedicated + +type SkuEnumerationForNewResourceResult struct { + Value *[]CapacitySku `json:"value,omitempty"` +} diff --git a/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/version.go b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/version.go new file mode 100644 index 000000000000..e34e7140761d --- /dev/null +++ b/internal/services/powerbi/sdk/2021-01-01/powerbidedicated/version.go @@ -0,0 +1,9 @@ +package powerbidedicated + +import "fmt" + +const defaultApiVersion = "2021-01-01" + +func userAgent() string { + return fmt.Sprintf("pandora/powerbidedicated/%s", defaultApiVersion) +} diff --git a/internal/services/recoveryservices/backup_protected_vm_resource.go b/internal/services/recoveryservices/backup_protected_vm_resource.go index 7724e3dfd344..7520eb42a4af 100644 --- a/internal/services/recoveryservices/backup_protected_vm_resource.go +++ b/internal/services/recoveryservices/backup_protected_vm_resource.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/services/recoveryservices/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -63,6 +64,26 @@ func resourceRecoveryServicesBackupProtectedVM() *pluginsdk.Resource { ValidateFunc: azure.ValidateResourceID, }, + "exclude_disk_luns": { + Type: pluginsdk.TypeSet, + ConflictsWith: []string{"include_disk_luns"}, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeInt, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + + "include_disk_luns": { + Type: pluginsdk.TypeSet, + ConflictsWith: []string{"exclude_disk_luns"}, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeInt, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + "tags": tags.Schema(), }, } @@ -107,12 +128,13 @@ func resourceRecoveryServicesBackupProtectedVMCreateUpdate(d *pluginsdk.Resource item := backup.ProtectedItemResource{ Tags: tags.Expand(t), Properties: &backup.AzureIaaSComputeVMProtectedItem{ - PolicyID: &policyId, - ProtectedItemType: backup.ProtectedItemTypeMicrosoftClassicComputevirtualMachines, - WorkloadType: backup.DataSourceTypeVM, - SourceResourceID: utils.String(vmId), - FriendlyName: utils.String(parsedVmId.Name), - VirtualMachineID: utils.String(vmId), + PolicyID: &policyId, + ProtectedItemType: backup.ProtectedItemTypeMicrosoftClassicComputevirtualMachines, + WorkloadType: backup.DataSourceTypeVM, + SourceResourceID: utils.String(vmId), + FriendlyName: utils.String(parsedVmId.Name), + ExtendedProperties: expandDiskExclusion(d), + VirtualMachineID: utils.String(vmId), }, } @@ -163,6 +185,18 @@ func resourceRecoveryServicesBackupProtectedVMRead(d *pluginsdk.ResourceData, me if v := vm.PolicyID; v != nil { d.Set("backup_policy_id", strings.Replace(*v, "Subscriptions", "subscriptions", 1)) } + + if v := vm.ExtendedProperties; v != nil && v.DiskExclusionProperties != nil { + if *v.DiskExclusionProperties.IsInclusionList { + if err := d.Set("include_disk_luns", utils.FlattenInt32Slice(v.DiskExclusionProperties.DiskLunList)); err != nil { + return fmt.Errorf("setting include_disk_luns: %+v", err) + } + } else { + if err := d.Set("exclude_disk_luns", utils.FlattenInt32Slice(v.DiskExclusionProperties.DiskLunList)); err != nil { + return fmt.Errorf("setting exclude_disk_luns: %+v", err) + } + } + } } } @@ -268,3 +302,36 @@ func resourceRecoveryServicesBackupProtectedVMRefreshFunc(ctx context.Context, c return resp, "Found", nil } } + +func expandDiskExclusion(d *pluginsdk.ResourceData) *backup.ExtendedProperties { + if v, ok := d.GetOk("include_disk_luns"); ok { + var diskLun = expandDiskLunList(v.(*pluginsdk.Set).List()) + + return &backup.ExtendedProperties{ + DiskExclusionProperties: &backup.DiskExclusionProperties{ + DiskLunList: utils.ExpandInt32Slice(diskLun), + IsInclusionList: utils.Bool(true), + }, + } + } + + if v, ok := d.GetOk("exclude_disk_luns"); ok { + var diskLun = expandDiskLunList(v.(*pluginsdk.Set).List()) + + return &backup.ExtendedProperties{ + DiskExclusionProperties: &backup.DiskExclusionProperties{ + DiskLunList: utils.ExpandInt32Slice(diskLun), + IsInclusionList: utils.Bool(false), + }, + } + } + return nil +} + +func expandDiskLunList(input []interface{}) []interface{} { + result := make([]interface{}, 0, len(input)) + for _, v := range input { + result = append(result, v.(int)) + } + return result +} diff --git a/internal/services/recoveryservices/backup_protected_vm_resource_test.go b/internal/services/recoveryservices/backup_protected_vm_resource_test.go index ae39823f4ec7..fee98e7fe172 100644 --- a/internal/services/recoveryservices/backup_protected_vm_resource_test.go +++ b/internal/services/recoveryservices/backup_protected_vm_resource_test.go @@ -119,6 +119,38 @@ func TestAccBackupProtectedVm_updateBackupPolicyId(t *testing.T) { }) } +func TestAccBackupProtectedVm_updateDiskExclusion(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_backup_protected_vm", "test") + r := BackupProtectedVmResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("resource_group_name").Exists(), + ), + }, + data.ImportStep(), + { + Config: r.updateDiskExclusion(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("resource_group_name").Exists(), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("resource_group_name").Exists(), + ), + }, + data.ImportStep(), + }) +} + func (t BackupProtectedVmResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ProtectedItemID(state.ID) if err != nil { @@ -200,7 +232,7 @@ resource "azurerm_virtual_machine" "test" { name = "acctestvm" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name - vm_size = "Standard_A0" + vm_size = "Standard_D1_v2" network_interface_ids = [azurerm_network_interface.test.id] storage_image_reference { @@ -226,6 +258,14 @@ resource "azurerm_virtual_machine" "test" { lun = 0 } + storage_data_disk { + name = "acctest-another-datadisk" + create_option = "Empty" + disk_size_gb = "1" + lun = 1 + managed_disk_type = "Standard_LRS" + } + os_profile { computer_name = "acctest" admin_username = "vmadmin" @@ -277,6 +317,23 @@ resource "azurerm_backup_protected_vm" "test" { recovery_vault_name = azurerm_recovery_services_vault.test.name source_vm_id = azurerm_virtual_machine.test.id backup_policy_id = azurerm_backup_policy_vm.test.id + + include_disk_luns = [0] +} +`, r.base(data)) +} + +func (r BackupProtectedVmResource) updateDiskExclusion(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_protected_vm" "test" { + resource_group_name = azurerm_resource_group.test.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + source_vm_id = azurerm_virtual_machine.test.id + backup_policy_id = azurerm_backup_policy_vm.test.id + + exclude_disk_luns = [0, 1] } `, r.base(data)) } diff --git a/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go b/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go index abf938c2414f..9ca151d7cb9c 100644 --- a/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go +++ b/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2018-07-10/siterecovery" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" diff --git a/internal/services/relay/relay_hybrid_connection_authorization_rule_resource.go b/internal/services/relay/relay_hybrid_connection_authorization_rule_resource.go index 937e6a4eb743..d6c2935eef7a 100644 --- a/internal/services/relay/relay_hybrid_connection_authorization_rule_resource.go +++ b/internal/services/relay/relay_hybrid_connection_authorization_rule_resource.go @@ -23,7 +23,7 @@ func resourceRelayHybridConnectionAuthorizationRule() *pluginsdk.Resource { Delete: resourceRelayHybridConnectionAuthorizationRuleDelete, Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := hybridconnections.ParseAuthorizationRuleID(id) + _, err := hybridconnections.ParseHybridConnectionAuthorizationRuleID(id) return err }), @@ -69,7 +69,7 @@ func resourceRelayHybridConnectionAuthorizationRuleCreateUpdate(d *pluginsdk.Res log.Printf("[INFO] preparing arguments for Relay HybridConnection Authorization Rule creation.") - resourceId := hybridconnections.NewAuthorizationRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("hybrid_connection_name").(string), d.Get("name").(string)) + resourceId := hybridconnections.NewHybridConnectionAuthorizationRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("hybrid_connection_name").(string), d.Get("name").(string)) if d.IsNewResource() { existing, err := client.GetAuthorizationRule(ctx, resourceId) if err != nil { @@ -83,7 +83,7 @@ func resourceRelayHybridConnectionAuthorizationRuleCreateUpdate(d *pluginsdk.Res } parameters := hybridconnections.AuthorizationRule{ - Name: utils.String(resourceId.Name), + Name: utils.String(resourceId.AuthorizationRuleName), Properties: hybridconnections.AuthorizationRuleProperties{ Rights: expandHybridConnectionAuthorizationRuleRights(d), }, @@ -103,7 +103,7 @@ func resourceRelayHybridConnectionAuthorizationRuleRead(d *pluginsdk.ResourceDat ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := hybridconnections.ParseAuthorizationRuleID(d.Id()) + id, err := hybridconnections.ParseHybridConnectionAuthorizationRuleID(d.Id()) if err != nil { return err } @@ -122,10 +122,10 @@ func resourceRelayHybridConnectionAuthorizationRuleRead(d *pluginsdk.ResourceDat return fmt.Errorf("listing keys for %s: %+v", id, err) } - d.Set("name", id.Name) + d.Set("name", id.AuthorizationRuleName) d.Set("hybrid_connection_name", id.HybridConnectionName) d.Set("namespace_name", id.NamespaceName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { listen, send, manage := flattenHybridConnectionAuthorizationRuleRights(model.Properties.Rights) @@ -147,7 +147,7 @@ func resourceRelayHybridConnectionAuthorizationRuleDelete(d *pluginsdk.ResourceD ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := hybridconnections.ParseAuthorizationRuleID(d.Id()) + id, err := hybridconnections.ParseHybridConnectionAuthorizationRuleID(d.Id()) if err != nil { return err } diff --git a/internal/services/relay/relay_hybrid_connection_authorization_rule_resource_test.go b/internal/services/relay/relay_hybrid_connection_authorization_rule_resource_test.go index 031f2e3fa2dc..0d40a1e9cf0b 100644 --- a/internal/services/relay/relay_hybrid_connection_authorization_rule_resource_test.go +++ b/internal/services/relay/relay_hybrid_connection_authorization_rule_resource_test.go @@ -52,7 +52,7 @@ func TestAccRelayHybridConnectionAuthorizationRule_requiresImport(t *testing.T) } func (t RelayHybridConnectionAuthorizationRuleResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := hybridconnections.ParseAuthorizationRuleID(state.ID) + id, err := hybridconnections.ParseHybridConnectionAuthorizationRuleID(state.ID) if err != nil { return nil, err } diff --git a/internal/services/relay/relay_hybrid_connection_resource.go b/internal/services/relay/relay_hybrid_connection_resource.go index 094421b4446c..2dd3b7e3ab73 100644 --- a/internal/services/relay/relay_hybrid_connection_resource.go +++ b/internal/services/relay/relay_hybrid_connection_resource.go @@ -99,7 +99,7 @@ func resourceArmRelayHybridConnectionCreateUpdate(d *pluginsdk.ResourceData, met } if _, err := client.CreateOrUpdate(ctx, id, parameters); err != nil { - return fmt.Errorf("creating/updating Hybrid Connection %q (Namespace %q Resource Group %q): %+v", id.Name, id.NamespaceName, id.ResourceGroup, err) + return fmt.Errorf("creating/updating %s: %+v", id, err) } d.SetId(id.ID()) @@ -126,9 +126,9 @@ func resourceArmRelayHybridConnectionRead(d *pluginsdk.ResourceData, meta interf return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) + d.Set("name", id.HybridConnectionName) d.Set("relay_namespace_name", id.NamespaceName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { if props := model.Properties; props != nil { @@ -151,7 +151,7 @@ func resourceArmRelayHybridConnectionDelete(d *pluginsdk.ResourceData, meta inte } if _, err := client.Delete(ctx, *id); err != nil { - return fmt.Errorf("deleting Hybrid Connection %q (Namespace %q / Resource Group %q): %+v", id.NamespaceName, id.NamespaceName, id.ResourceGroup, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } // we can't make use of the Future here due to a bug where 404 isn't tracked as Successful diff --git a/internal/services/relay/relay_namespace_authorization_rule_resource.go b/internal/services/relay/relay_namespace_authorization_rule_resource.go index 30b126abb310..d2c5f849153c 100644 --- a/internal/services/relay/relay_namespace_authorization_rule_resource.go +++ b/internal/services/relay/relay_namespace_authorization_rule_resource.go @@ -77,7 +77,7 @@ func resourceRelayNamespaceAuthorizationRuleCreateUpdate(d *pluginsdk.ResourceDa } parameters := namespaces.AuthorizationRule{ - Name: utils.String(resourceId.Name), + Name: utils.String(resourceId.AuthorizationRuleName), Properties: namespaces.AuthorizationRuleProperties{ Rights: expandAuthorizationRuleRights(d), }, @@ -116,9 +116,9 @@ func resourceRelayNamespaceAuthorizationRuleRead(d *pluginsdk.ResourceData, meta return fmt.Errorf("listing keys for %s: %+v", id, err) } - d.Set("name", id.Name) + d.Set("name", id.AuthorizationRuleName) d.Set("namespace_name", id.NamespaceName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { listen, send, manage := flattenAuthorizationRuleRights(model.Properties.Rights) diff --git a/internal/services/relay/relay_namespace_resource.go b/internal/services/relay/relay_namespace_resource.go index 4a8e38273926..af5565f0ed5f 100644 --- a/internal/services/relay/relay_namespace_resource.go +++ b/internal/services/relay/relay_namespace_resource.go @@ -151,14 +151,14 @@ func resourceRelayNamespaceRead(d *pluginsdk.ResourceData, meta interface{}) err return fmt.Errorf("retrieving %s: %+v", *id, err) } - authRuleId := namespaces.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroup, id.Name, "RootManageSharedAccessKey") + authRuleId := namespaces.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroupName, id.NamespaceName, "RootManageSharedAccessKey") keysResp, err := client.ListKeys(ctx, authRuleId) if err != nil { return fmt.Errorf("listing keys for %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", location.Normalize(model.Location)) @@ -201,7 +201,7 @@ func resourceRelayNamespaceDelete(d *pluginsdk.ResourceData, meta interface{}) e } // we can't make use of the Future here due to a bug where 404 isn't tracked as Successful - log.Printf("[DEBUG] Waiting for Relay Namespace %q (Resource Group %q) to be deleted", id.Name, id.ResourceGroup) + log.Printf("[DEBUG] Waiting for %s to be deleted", *id) stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Pending"}, Target: []string{"Deleted"}, diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/constants.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/constants.go index 7dd1d036c77c..36221384643e 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/constants.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/constants.go @@ -1,5 +1,7 @@ package hybridconnections +import "strings" + type AccessRights string const ( @@ -8,9 +10,53 @@ const ( AccessRightsSend AccessRights = "Send" ) +func PossibleValuesForAccessRights() []string { + return []string{ + string(AccessRightsListen), + string(AccessRightsManage), + string(AccessRightsSend), + } +} + +func parseAccessRights(input string) (*AccessRights, error) { + vals := map[string]AccessRights{ + "listen": AccessRightsListen, + "manage": AccessRightsManage, + "send": AccessRightsSend, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccessRights(input) + return &out, nil +} + type KeyType string const ( KeyTypePrimaryKey KeyType = "PrimaryKey" KeyTypeSecondaryKey KeyType = "SecondaryKey" ) + +func PossibleValuesForKeyType() []string { + return []string{ + string(KeyTypePrimaryKey), + string(KeyTypeSecondaryKey), + } +} + +func parseKeyType(input string) (*KeyType, error) { + vals := map[string]KeyType{ + "primarykey": KeyTypePrimaryKey, + "secondarykey": KeyTypeSecondaryKey, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyType(input) + return &out, nil +} diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_authorizationrule.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_authorizationrule.go deleted file mode 100644 index 1e742d98c18c..000000000000 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_authorizationrule.go +++ /dev/null @@ -1,144 +0,0 @@ -package hybridconnections - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" -) - -type AuthorizationRuleId struct { - SubscriptionId string - ResourceGroup string - NamespaceName string - HybridConnectionName string - Name string -} - -func NewAuthorizationRuleID(subscriptionId, resourceGroup, namespaceName, hybridConnectionName, name string) AuthorizationRuleId { - return AuthorizationRuleId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - NamespaceName: namespaceName, - HybridConnectionName: hybridConnectionName, - Name: name, - } -} - -func (id AuthorizationRuleId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Hybrid Connection Name %q", id.HybridConnectionName), - fmt.Sprintf("Namespace Name %q", id.NamespaceName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Authorization Rule", segmentsStr) -} - -func (id AuthorizationRuleId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/hybridConnections/%s/authorizationRules/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.HybridConnectionName, id.Name) -} - -// ParseAuthorizationRuleID parses a AuthorizationRule ID into an AuthorizationRuleId struct -func ParseAuthorizationRuleID(input string) (*AuthorizationRuleId, error) { - id, err := resourceids.ParseAzureResourceID(input) - if err != nil { - return nil, err - } - - resourceId := AuthorizationRuleId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } - - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } - - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") - } - - if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { - return nil, err - } - if resourceId.HybridConnectionName, err = id.PopSegment("hybridConnections"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("authorizationRules"); err != nil { - return nil, err - } - - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil -} - -// ParseAuthorizationRuleIDInsensitively parses an AuthorizationRule ID into an AuthorizationRuleId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseAuthorizationRuleID method should be used instead for validation etc. -func ParseAuthorizationRuleIDInsensitively(input string) (*AuthorizationRuleId, error) { - id, err := resourceids.ParseAzureResourceID(input) - if err != nil { - return nil, err - } - - resourceId := AuthorizationRuleId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } - - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } - - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") - } - - // find the correct casing for the 'namespaces' segment - namespacesKey := "namespaces" - for key := range id.Path { - if strings.EqualFold(key, namespacesKey) { - namespacesKey = key - break - } - } - if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { - return nil, err - } - - // find the correct casing for the 'hybridConnections' segment - hybridConnectionsKey := "hybridConnections" - for key := range id.Path { - if strings.EqualFold(key, hybridConnectionsKey) { - hybridConnectionsKey = key - break - } - } - if resourceId.HybridConnectionName, err = id.PopSegment(hybridConnectionsKey); err != nil { - return nil, err - } - - // find the correct casing for the 'authorizationRules' segment - authorizationRulesKey := "authorizationRules" - for key := range id.Path { - if strings.EqualFold(key, authorizationRulesKey) { - authorizationRulesKey = key - break - } - } - if resourceId.Name, err = id.PopSegment(authorizationRulesKey); err != nil { - return nil, err - } - - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil -} diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_authorizationrule_test.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_authorizationrule_test.go deleted file mode 100644 index 75aed7d91032..000000000000 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_authorizationrule_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package hybridconnections - -import ( - "testing" - - "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" -) - -var _ resourceids.Id = AuthorizationRuleId{} - -func TestAuthorizationRuleIDFormatter(t *testing.T) { - actual := NewAuthorizationRuleID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{hybridConnectionName}", "{authorizationRuleName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/authorizationRules/{authorizationRuleName}" - if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) - } -} - -func TestParseAuthorizationRuleID(t *testing.T) { - testData := []struct { - Input string - Error bool - Expected *AuthorizationRuleId - }{ - - { - // empty - Input: "", - Error: true, - }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", - Error: true, - }, - - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", - Error: true, - }, - - { - // missing NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", - Error: true, - }, - - { - // missing value for NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", - Error: true, - }, - - { - // missing HybridConnectionName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/", - Error: true, - }, - - { - // missing value for HybridConnectionName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/", - Error: true, - }, - - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/", - Error: true, - }, - - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/authorizationRules/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/authorizationRules/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - HybridConnectionName: "{hybridConnectionName}", - Name: "{authorizationRuleName}", - }, - }, - - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.RELAY/NAMESPACES/{NAMESPACENAME}/HYBRIDCONNECTIONS/{HYBRIDCONNECTIONNAME}/AUTHORIZATIONRULES/{AUTHORIZATIONRULENAME}", - Error: true, - }, - } - - for _, v := range testData { - t.Logf("[DEBUG] Testing %q", v.Input) - - actual, err := ParseAuthorizationRuleID(v.Input) - if err != nil { - if v.Error { - continue - } - - t.Fatalf("Expect a value but got an error: %s", err) - } - if v.Error { - t.Fatal("Expect an error but didn't get one") - } - - if actual.SubscriptionId != v.Expected.SubscriptionId { - t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) - } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) - } - if actual.NamespaceName != v.Expected.NamespaceName { - t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) - } - if actual.HybridConnectionName != v.Expected.HybridConnectionName { - t.Fatalf("Expected %q but got %q for HybridConnectionName", v.Expected.HybridConnectionName, actual.HybridConnectionName) - } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) - } - } -} - -func TestParseAuthorizationRuleIDInsensitively(t *testing.T) { - testData := []struct { - Input string - Error bool - Expected *AuthorizationRuleId - }{ - - { - // empty - Input: "", - Error: true, - }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", - Error: true, - }, - - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", - Error: true, - }, - - { - // missing NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", - Error: true, - }, - - { - // missing value for NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", - Error: true, - }, - - { - // missing HybridConnectionName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/", - Error: true, - }, - - { - // missing value for HybridConnectionName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/", - Error: true, - }, - - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/", - Error: true, - }, - - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/authorizationRules/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/authorizationRules/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - HybridConnectionName: "{hybridConnectionName}", - Name: "{authorizationRuleName}", - }, - }, - - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridconnections/{hybridConnectionName}/authorizationrules/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - HybridConnectionName: "{hybridConnectionName}", - Name: "{authorizationRuleName}", - }, - }, - - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NAMESPACES/{namespaceName}/HYBRIDCONNECTIONS/{hybridConnectionName}/AUTHORIZATIONRULES/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - HybridConnectionName: "{hybridConnectionName}", - Name: "{authorizationRuleName}", - }, - }, - - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NaMeSpAcEs/{namespaceName}/HyBrIdCoNnEcTiOnS/{hybridConnectionName}/AuThOrIzAtIoNrUlEs/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - HybridConnectionName: "{hybridConnectionName}", - Name: "{authorizationRuleName}", - }, - }, - } - - for _, v := range testData { - t.Logf("[DEBUG] Testing %q", v.Input) - - actual, err := ParseAuthorizationRuleIDInsensitively(v.Input) - if err != nil { - if v.Error { - continue - } - - t.Fatalf("Expect a value but got an error: %s", err) - } - if v.Error { - t.Fatal("Expect an error but didn't get one") - } - - if actual.SubscriptionId != v.Expected.SubscriptionId { - t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) - } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) - } - if actual.NamespaceName != v.Expected.NamespaceName { - t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) - } - if actual.HybridConnectionName != v.Expected.HybridConnectionName { - t.Fatalf("Expected %q but got %q for HybridConnectionName", v.Expected.HybridConnectionName, actual.HybridConnectionName) - } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) - } - } -} diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection.go index fa8a4ad74988..38233f8f7b70 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = HybridConnectionId{} + +// HybridConnectionId is a struct representing the Resource ID for a Hybrid Connection type HybridConnectionId struct { - SubscriptionId string - ResourceGroup string - NamespaceName string - Name string + SubscriptionId string + ResourceGroupName string + NamespaceName string + HybridConnectionName string } -func NewHybridConnectionID(subscriptionId, resourceGroup, namespaceName, name string) HybridConnectionId { +// NewHybridConnectionID returns a new HybridConnectionId struct +func NewHybridConnectionID(subscriptionId string, resourceGroupName string, namespaceName string, hybridConnectionName string) HybridConnectionId { return HybridConnectionId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - NamespaceName: namespaceName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, + HybridConnectionName: hybridConnectionName, } } -func (id HybridConnectionId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Namespace Name %q", id.NamespaceName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Hybrid Connection", segmentsStr) -} - -func (id HybridConnectionId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/hybridConnections/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) -} - -// ParseHybridConnectionID parses a HybridConnection ID into an HybridConnectionId struct +// ParseHybridConnectionID parses 'input' into a HybridConnectionId func ParseHybridConnectionID(input string) (*HybridConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(HybridConnectionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := HybridConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := HybridConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("hybridConnections"); err != nil { - return nil, err + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.HybridConnectionName, ok = parsed.Parsed["hybridConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'hybridConnectionName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseHybridConnectionIDInsensitively parses an HybridConnection ID into an HybridConnectionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseHybridConnectionID method should be used instead for validation etc. +// ParseHybridConnectionIDInsensitively parses 'input' case-insensitively into a HybridConnectionId +// note: this method should only be used for API response data and not user input func ParseHybridConnectionIDInsensitively(input string) (*HybridConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(HybridConnectionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := HybridConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := HybridConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'namespaces' segment - namespacesKey := "namespaces" - for key := range id.Path { - if strings.EqualFold(key, namespacesKey) { - namespacesKey = key - break - } + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { - return nil, err + + if id.HybridConnectionName, ok = parsed.Parsed["hybridConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'hybridConnectionName' was not found in the resource id %q", input) } - // find the correct casing for the 'hybridConnections' segment - hybridConnectionsKey := "hybridConnections" - for key := range id.Path { - if strings.EqualFold(key, hybridConnectionsKey) { - hybridConnectionsKey = key - break - } + return &id, nil +} + +// ValidateHybridConnectionID checks that 'input' can be parsed as a Hybrid Connection ID +func ValidateHybridConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(hybridConnectionsKey); err != nil { - return nil, err + + if _, err := ParseHybridConnectionID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Hybrid Connection ID +func (id HybridConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/hybridConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName, id.HybridConnectionName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Hybrid Connection ID +func (id HybridConnectionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), + resourceids.StaticSegment("hybridConnections", "hybridConnections", "hybridConnections"), + resourceids.UserSpecifiedSegment("hybridConnectionName", "hybridConnectionValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Hybrid Connection ID +func (id HybridConnectionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + fmt.Sprintf("Hybrid Connection Name: %q", id.HybridConnectionName), + } + return fmt.Sprintf("Hybrid Connection (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection_test.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection_test.go index 1b28659d975a..855497366dde 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection_test.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnection_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = HybridConnectionId{} +var _ resourceids.ResourceId = HybridConnectionId{} -func TestHybridConnectionIDFormatter(t *testing.T) { - actual := NewHybridConnectionID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{hybridConnectionName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}" +func TestNewHybridConnectionID(t *testing.T) { + id := NewHybridConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "hybridConnectionValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } + + if id.HybridConnectionName != "hybridConnectionValue" { + t.Fatalf("Expected %q but got %q for Segment 'HybridConnectionName'", id.HybridConnectionName, "hybridConnectionValue") + } +} + +func TestFormatHybridConnectionID(t *testing.T) { + actual := NewHybridConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "hybridConnectionValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseHybridConnectionID(t *testing.T) { Error bool Expected *HybridConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue", Expected: &HybridConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{hybridConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + HybridConnectionName: "hybridConnectionValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.RELAY/NAMESPACES/{NAMESPACENAME}/HYBRIDCONNECTIONS/{HYBRIDCONNECTIONNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseHybridConnectionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseHybridConnectionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.NamespaceName != v.Expected.NamespaceName { t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.HybridConnectionName != v.Expected.HybridConnectionName { + t.Fatalf("Expected %q but got %q for HybridConnectionName", v.Expected.HybridConnectionName, actual.HybridConnectionName) } + } } @@ -131,106 +148,132 @@ func TestParseHybridConnectionIDInsensitively(t *testing.T) { Error bool Expected *HybridConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}", - Expected: &HybridConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{hybridConnectionName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridconnections/{hybridConnectionName}", - Expected: &HybridConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{hybridConnectionName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NAMESPACES/{namespaceName}/HYBRIDCONNECTIONS/{hybridConnectionName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue", Expected: &HybridConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{hybridConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + HybridConnectionName: "hybridConnectionValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NaMeSpAcEs/{namespaceName}/HyBrIdCoNnEcTiOnS/{hybridConnectionName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs/hYbRiDcOnNeCtIoNvAlUe", Expected: &HybridConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{hybridConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", + HybridConnectionName: "hYbRiDcOnNeCtIoNvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs/hYbRiDcOnNeCtIoNvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseHybridConnectionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,18 @@ func TestParseHybridConnectionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.NamespaceName != v.Expected.NamespaceName { t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.HybridConnectionName != v.Expected.HybridConnectionName { + t.Fatalf("Expected %q but got %q for HybridConnectionName", v.Expected.HybridConnectionName, actual.HybridConnectionName) } + } } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnectionauthorizationrule.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnectionauthorizationrule.go new file mode 100644 index 000000000000..857c0c60078f --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnectionauthorizationrule.go @@ -0,0 +1,150 @@ +package hybridconnections + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = HybridConnectionAuthorizationRuleId{} + +// HybridConnectionAuthorizationRuleId is a struct representing the Resource ID for a Hybrid Connection Authorization Rule +type HybridConnectionAuthorizationRuleId struct { + SubscriptionId string + ResourceGroupName string + NamespaceName string + HybridConnectionName string + AuthorizationRuleName string +} + +// NewHybridConnectionAuthorizationRuleID returns a new HybridConnectionAuthorizationRuleId struct +func NewHybridConnectionAuthorizationRuleID(subscriptionId string, resourceGroupName string, namespaceName string, hybridConnectionName string, authorizationRuleName string) HybridConnectionAuthorizationRuleId { + return HybridConnectionAuthorizationRuleId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, + HybridConnectionName: hybridConnectionName, + AuthorizationRuleName: authorizationRuleName, + } +} + +// ParseHybridConnectionAuthorizationRuleID parses 'input' into a HybridConnectionAuthorizationRuleId +func ParseHybridConnectionAuthorizationRuleID(input string) (*HybridConnectionAuthorizationRuleId, error) { + parser := resourceids.NewParserFromResourceIdType(HybridConnectionAuthorizationRuleId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := HybridConnectionAuthorizationRuleId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + if id.HybridConnectionName, ok = parsed.Parsed["hybridConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'hybridConnectionName' was not found in the resource id %q", input) + } + + if id.AuthorizationRuleName, ok = parsed.Parsed["authorizationRuleName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationRuleName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseHybridConnectionAuthorizationRuleIDInsensitively parses 'input' case-insensitively into a HybridConnectionAuthorizationRuleId +// note: this method should only be used for API response data and not user input +func ParseHybridConnectionAuthorizationRuleIDInsensitively(input string) (*HybridConnectionAuthorizationRuleId, error) { + parser := resourceids.NewParserFromResourceIdType(HybridConnectionAuthorizationRuleId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := HybridConnectionAuthorizationRuleId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + if id.HybridConnectionName, ok = parsed.Parsed["hybridConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'hybridConnectionName' was not found in the resource id %q", input) + } + + if id.AuthorizationRuleName, ok = parsed.Parsed["authorizationRuleName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationRuleName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateHybridConnectionAuthorizationRuleID checks that 'input' can be parsed as a Hybrid Connection Authorization Rule ID +func ValidateHybridConnectionAuthorizationRuleID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseHybridConnectionAuthorizationRuleID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Hybrid Connection Authorization Rule ID +func (id HybridConnectionAuthorizationRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/hybridConnections/%s/authorizationRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName, id.HybridConnectionName, id.AuthorizationRuleName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Hybrid Connection Authorization Rule ID +func (id HybridConnectionAuthorizationRuleId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), + resourceids.StaticSegment("hybridConnections", "hybridConnections", "hybridConnections"), + resourceids.UserSpecifiedSegment("hybridConnectionName", "hybridConnectionValue"), + resourceids.StaticSegment("authorizationRules", "authorizationRules", "authorizationRules"), + resourceids.UserSpecifiedSegment("authorizationRuleName", "authorizationRuleValue"), + } +} + +// String returns a human-readable description of this Hybrid Connection Authorization Rule ID +func (id HybridConnectionAuthorizationRuleId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + fmt.Sprintf("Hybrid Connection Name: %q", id.HybridConnectionName), + fmt.Sprintf("Authorization Rule Name: %q", id.AuthorizationRuleName), + } + return fmt.Sprintf("Hybrid Connection Authorization Rule (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnectionauthorizationrule_test.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnectionauthorizationrule_test.go new file mode 100644 index 000000000000..d97c0c972d22 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_hybridconnectionauthorizationrule_test.go @@ -0,0 +1,354 @@ +package hybridconnections + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = HybridConnectionAuthorizationRuleId{} + +func TestNewHybridConnectionAuthorizationRuleID(t *testing.T) { + id := NewHybridConnectionAuthorizationRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "hybridConnectionValue", "authorizationRuleValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } + + if id.HybridConnectionName != "hybridConnectionValue" { + t.Fatalf("Expected %q but got %q for Segment 'HybridConnectionName'", id.HybridConnectionName, "hybridConnectionValue") + } + + if id.AuthorizationRuleName != "authorizationRuleValue" { + t.Fatalf("Expected %q but got %q for Segment 'AuthorizationRuleName'", id.AuthorizationRuleName, "authorizationRuleValue") + } +} + +func TestFormatHybridConnectionAuthorizationRuleID(t *testing.T) { + actual := NewHybridConnectionAuthorizationRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "hybridConnectionValue", "authorizationRuleValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules/authorizationRuleValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseHybridConnectionAuthorizationRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *HybridConnectionAuthorizationRuleId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules/authorizationRuleValue", + Expected: &HybridConnectionAuthorizationRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + HybridConnectionName: "hybridConnectionValue", + AuthorizationRuleName: "authorizationRuleValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules/authorizationRuleValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseHybridConnectionAuthorizationRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + if actual.HybridConnectionName != v.Expected.HybridConnectionName { + t.Fatalf("Expected %q but got %q for HybridConnectionName", v.Expected.HybridConnectionName, actual.HybridConnectionName) + } + + if actual.AuthorizationRuleName != v.Expected.AuthorizationRuleName { + t.Fatalf("Expected %q but got %q for AuthorizationRuleName", v.Expected.AuthorizationRuleName, actual.AuthorizationRuleName) + } + + } +} + +func TestParseHybridConnectionAuthorizationRuleIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *HybridConnectionAuthorizationRuleId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs/hYbRiDcOnNeCtIoNvAlUe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs/hYbRiDcOnNeCtIoNvAlUe/aUtHoRiZaTiOnRuLeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules/authorizationRuleValue", + Expected: &HybridConnectionAuthorizationRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + HybridConnectionName: "hybridConnectionValue", + AuthorizationRuleName: "authorizationRuleValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/hybridConnections/hybridConnectionValue/authorizationRules/authorizationRuleValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs/hYbRiDcOnNeCtIoNvAlUe/aUtHoRiZaTiOnRuLeS/aUtHoRiZaTiOnRuLeVaLuE", + Expected: &HybridConnectionAuthorizationRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", + HybridConnectionName: "hYbRiDcOnNeCtIoNvAlUe", + AuthorizationRuleName: "aUtHoRiZaTiOnRuLeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/hYbRiDcOnNeCtIoNs/hYbRiDcOnNeCtIoNvAlUe/aUtHoRiZaTiOnRuLeS/aUtHoRiZaTiOnRuLeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseHybridConnectionAuthorizationRuleIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + if actual.HybridConnectionName != v.Expected.HybridConnectionName { + t.Fatalf("Expected %q but got %q for HybridConnectionName", v.Expected.HybridConnectionName, actual.HybridConnectionName) + } + + if actual.AuthorizationRuleName != v.Expected.AuthorizationRuleName { + t.Fatalf("Expected %q but got %q for AuthorizationRuleName", v.Expected.AuthorizationRuleName, actual.AuthorizationRuleName) + } + + } +} diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace.go index c32038198126..4c008d1056da 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = NamespaceId{} + +// NamespaceId is a struct representing the Resource ID for a Namespace type NamespaceId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + NamespaceName string } -func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { +// NewNamespaceID returns a new NamespaceId struct +func NewNamespaceID(subscriptionId string, resourceGroupName string, namespaceName string) NamespaceId { return NamespaceId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, } } -func (id NamespaceId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) -} - -func (id NamespaceId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParseNamespaceID parses a Namespace ID into an NamespaceId struct +// ParseNamespaceID parses 'input' into a NamespaceId func ParseNamespaceID(input string) (*NamespaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(NamespaceId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := NamespaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := NamespaceId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseNamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseNamespaceID method should be used instead for validation etc. +// ParseNamespaceIDInsensitively parses 'input' case-insensitively into a NamespaceId +// note: this method should only be used for API response data and not user input func ParseNamespaceIDInsensitively(input string) (*NamespaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(NamespaceId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := NamespaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := NamespaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - // find the correct casing for the 'namespaces' segment - namespacesKey := "namespaces" - for key := range id.Path { - if strings.EqualFold(key, namespacesKey) { - namespacesKey = key - break - } + return &id, nil +} + +// ValidateNamespaceID checks that 'input' can be parsed as a Namespace ID +func ValidateNamespaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { - return nil, err + + if _, err := ParseNamespaceID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Namespace ID +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Namespace ID +func (id NamespaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Namespace ID +func (id NamespaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + } + return fmt.Sprintf("Namespace (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace_test.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace_test.go index ddee085f3756..be321bb45b9e 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace_test.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/id_namespace_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = NamespaceId{} +var _ resourceids.ResourceId = NamespaceId{} -func TestNamespaceIDFormatter(t *testing.T) { - actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}" +func TestNewNamespaceID(t *testing.T) { + id := NewNamespaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } +} + +func TestFormatNamespaceID(t *testing.T) { + actual := NewNamespaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseNamespaceID(t *testing.T) { Error bool Expected *NamespaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.RELAY/NAMESPACES/{NAMESPACENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseNamespaceID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseNamespaceID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } + } } @@ -115,90 +129,110 @@ func TestParseNamespaceIDInsensitively(t *testing.T) { Error bool Expected *NamespaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}", - Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}", - Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NAMESPACES/{namespaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NaMeSpAcEs/{namespaceName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseNamespaceIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseNamespaceIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } + } } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_createorupdateauthorizationrule_autorest.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_createorupdateauthorizationrule_autorest.go index fc209e2aa4b1..156edac9b490 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_createorupdateauthorizationrule_autorest.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_createorupdateauthorizationrule_autorest.go @@ -14,7 +14,7 @@ type CreateOrUpdateAuthorizationRuleResponse struct { } // CreateOrUpdateAuthorizationRule ... -func (c HybridConnectionsClient) CreateOrUpdateAuthorizationRule(ctx context.Context, id AuthorizationRuleId, input AuthorizationRule) (result CreateOrUpdateAuthorizationRuleResponse, err error) { +func (c HybridConnectionsClient) CreateOrUpdateAuthorizationRule(ctx context.Context, id HybridConnectionAuthorizationRuleId, input AuthorizationRule) (result CreateOrUpdateAuthorizationRuleResponse, err error) { req, err := c.preparerForCreateOrUpdateAuthorizationRule(ctx, id, input) if err != nil { err = autorest.NewErrorWithError(err, "hybridconnections.HybridConnectionsClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") @@ -37,7 +37,7 @@ func (c HybridConnectionsClient) CreateOrUpdateAuthorizationRule(ctx context.Con } // preparerForCreateOrUpdateAuthorizationRule prepares the CreateOrUpdateAuthorizationRule request. -func (c HybridConnectionsClient) preparerForCreateOrUpdateAuthorizationRule(ctx context.Context, id AuthorizationRuleId, input AuthorizationRule) (*http.Request, error) { +func (c HybridConnectionsClient) preparerForCreateOrUpdateAuthorizationRule(ctx context.Context, id HybridConnectionAuthorizationRuleId, input AuthorizationRule) (*http.Request, error) { queryParameters := map[string]interface{}{ "api-version": defaultApiVersion, } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_deleteauthorizationrule_autorest.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_deleteauthorizationrule_autorest.go index 67cebd939c79..6e09bf0926c4 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_deleteauthorizationrule_autorest.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_deleteauthorizationrule_autorest.go @@ -13,7 +13,7 @@ type DeleteAuthorizationRuleResponse struct { } // DeleteAuthorizationRule ... -func (c HybridConnectionsClient) DeleteAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (result DeleteAuthorizationRuleResponse, err error) { +func (c HybridConnectionsClient) DeleteAuthorizationRule(ctx context.Context, id HybridConnectionAuthorizationRuleId) (result DeleteAuthorizationRuleResponse, err error) { req, err := c.preparerForDeleteAuthorizationRule(ctx, id) if err != nil { err = autorest.NewErrorWithError(err, "hybridconnections.HybridConnectionsClient", "DeleteAuthorizationRule", nil, "Failure preparing request") @@ -36,7 +36,7 @@ func (c HybridConnectionsClient) DeleteAuthorizationRule(ctx context.Context, id } // preparerForDeleteAuthorizationRule prepares the DeleteAuthorizationRule request. -func (c HybridConnectionsClient) preparerForDeleteAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { +func (c HybridConnectionsClient) preparerForDeleteAuthorizationRule(ctx context.Context, id HybridConnectionAuthorizationRuleId) (*http.Request, error) { queryParameters := map[string]interface{}{ "api-version": defaultApiVersion, } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_getauthorizationrule_autorest.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_getauthorizationrule_autorest.go index d25392bbd586..e82d8178c7f7 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_getauthorizationrule_autorest.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_getauthorizationrule_autorest.go @@ -14,7 +14,7 @@ type GetAuthorizationRuleResponse struct { } // GetAuthorizationRule ... -func (c HybridConnectionsClient) GetAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (result GetAuthorizationRuleResponse, err error) { +func (c HybridConnectionsClient) GetAuthorizationRule(ctx context.Context, id HybridConnectionAuthorizationRuleId) (result GetAuthorizationRuleResponse, err error) { req, err := c.preparerForGetAuthorizationRule(ctx, id) if err != nil { err = autorest.NewErrorWithError(err, "hybridconnections.HybridConnectionsClient", "GetAuthorizationRule", nil, "Failure preparing request") @@ -37,7 +37,7 @@ func (c HybridConnectionsClient) GetAuthorizationRule(ctx context.Context, id Au } // preparerForGetAuthorizationRule prepares the GetAuthorizationRule request. -func (c HybridConnectionsClient) preparerForGetAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { +func (c HybridConnectionsClient) preparerForGetAuthorizationRule(ctx context.Context, id HybridConnectionAuthorizationRuleId) (*http.Request, error) { queryParameters := map[string]interface{}{ "api-version": defaultApiVersion, } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_listkeys_autorest.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_listkeys_autorest.go index 679f7c9f720e..67a48ff9a90e 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_listkeys_autorest.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_listkeys_autorest.go @@ -15,7 +15,7 @@ type ListKeysResponse struct { } // ListKeys ... -func (c HybridConnectionsClient) ListKeys(ctx context.Context, id AuthorizationRuleId) (result ListKeysResponse, err error) { +func (c HybridConnectionsClient) ListKeys(ctx context.Context, id HybridConnectionAuthorizationRuleId) (result ListKeysResponse, err error) { req, err := c.preparerForListKeys(ctx, id) if err != nil { err = autorest.NewErrorWithError(err, "hybridconnections.HybridConnectionsClient", "ListKeys", nil, "Failure preparing request") @@ -38,7 +38,7 @@ func (c HybridConnectionsClient) ListKeys(ctx context.Context, id AuthorizationR } // preparerForListKeys prepares the ListKeys request. -func (c HybridConnectionsClient) preparerForListKeys(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { +func (c HybridConnectionsClient) preparerForListKeys(ctx context.Context, id HybridConnectionAuthorizationRuleId) (*http.Request, error) { queryParameters := map[string]interface{}{ "api-version": defaultApiVersion, } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_regeneratekeys_autorest.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_regeneratekeys_autorest.go index 3103812a0c82..b4c1cbc34a90 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/method_regeneratekeys_autorest.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/method_regeneratekeys_autorest.go @@ -15,7 +15,7 @@ type RegenerateKeysResponse struct { } // RegenerateKeys ... -func (c HybridConnectionsClient) RegenerateKeys(ctx context.Context, id AuthorizationRuleId, input RegenerateAccessKeyParameters) (result RegenerateKeysResponse, err error) { +func (c HybridConnectionsClient) RegenerateKeys(ctx context.Context, id HybridConnectionAuthorizationRuleId, input RegenerateAccessKeyParameters) (result RegenerateKeysResponse, err error) { req, err := c.preparerForRegenerateKeys(ctx, id, input) if err != nil { err = autorest.NewErrorWithError(err, "hybridconnections.HybridConnectionsClient", "RegenerateKeys", nil, "Failure preparing request") @@ -38,7 +38,7 @@ func (c HybridConnectionsClient) RegenerateKeys(ctx context.Context, id Authoriz } // preparerForRegenerateKeys prepares the RegenerateKeys request. -func (c HybridConnectionsClient) preparerForRegenerateKeys(ctx context.Context, id AuthorizationRuleId, input RegenerateAccessKeyParameters) (*http.Request, error) { +func (c HybridConnectionsClient) preparerForRegenerateKeys(ctx context.Context, id HybridConnectionAuthorizationRuleId, input RegenerateAccessKeyParameters) (*http.Request, error) { queryParameters := map[string]interface{}{ "api-version": defaultApiVersion, } diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/model_hybridconnectionproperties.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/model_hybridconnectionproperties.go index 810a60464d66..55933ee4b62a 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/model_hybridconnectionproperties.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/model_hybridconnectionproperties.go @@ -1,11 +1,5 @@ package hybridconnections -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - type HybridConnectionProperties struct { CreatedAt *string `json:"createdAt,omitempty"` ListenerCount *int64 `json:"listenerCount,omitempty"` @@ -13,21 +7,3 @@ type HybridConnectionProperties struct { UpdatedAt *string `json:"updatedAt,omitempty"` UserMetadata *string `json:"userMetadata,omitempty"` } - -func (o HybridConnectionProperties) GetCreatedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o HybridConnectionProperties) SetCreatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.CreatedAt = &formatted -} - -func (o HybridConnectionProperties) GetUpdatedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.UpdatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o HybridConnectionProperties) SetUpdatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.UpdatedAt = &formatted -} diff --git a/internal/services/relay/sdk/2017-04-01/hybridconnections/predicates.go b/internal/services/relay/sdk/2017-04-01/hybridconnections/predicates.go index d7b295a37258..6f8e25be714e 100644 --- a/internal/services/relay/sdk/2017-04-01/hybridconnections/predicates.go +++ b/internal/services/relay/sdk/2017-04-01/hybridconnections/predicates.go @@ -7,6 +7,7 @@ type AuthorizationRulePredicate struct { } func (p AuthorizationRulePredicate) Matches(input AuthorizationRule) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } @@ -29,6 +30,7 @@ type HybridConnectionPredicate struct { } func (p HybridConnectionPredicate) Matches(input HybridConnection) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/constants.go b/internal/services/relay/sdk/2017-04-01/namespaces/constants.go index 4d8eb92bb26c..c3b3ef880d7e 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/constants.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/constants.go @@ -1,5 +1,7 @@ package namespaces +import "strings" + type AccessRights string const ( @@ -8,6 +10,29 @@ const ( AccessRightsSend AccessRights = "Send" ) +func PossibleValuesForAccessRights() []string { + return []string{ + string(AccessRightsListen), + string(AccessRightsManage), + string(AccessRightsSend), + } +} + +func parseAccessRights(input string) (*AccessRights, error) { + vals := map[string]AccessRights{ + "listen": AccessRightsListen, + "manage": AccessRightsManage, + "send": AccessRightsSend, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccessRights(input) + return &out, nil +} + type KeyType string const ( @@ -15,6 +40,27 @@ const ( KeyTypeSecondaryKey KeyType = "SecondaryKey" ) +func PossibleValuesForKeyType() []string { + return []string{ + string(KeyTypePrimaryKey), + string(KeyTypeSecondaryKey), + } +} + +func parseKeyType(input string) (*KeyType, error) { + vals := map[string]KeyType{ + "primarykey": KeyTypePrimaryKey, + "secondarykey": KeyTypeSecondaryKey, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyType(input) + return &out, nil +} + type ProvisioningStateEnum string const ( @@ -26,18 +72,85 @@ const ( ProvisioningStateEnumUpdating ProvisioningStateEnum = "Updating" ) +func PossibleValuesForProvisioningStateEnum() []string { + return []string{ + string(ProvisioningStateEnumCreated), + string(ProvisioningStateEnumDeleted), + string(ProvisioningStateEnumFailed), + string(ProvisioningStateEnumSucceeded), + string(ProvisioningStateEnumUnknown), + string(ProvisioningStateEnumUpdating), + } +} + +func parseProvisioningStateEnum(input string) (*ProvisioningStateEnum, error) { + vals := map[string]ProvisioningStateEnum{ + "created": ProvisioningStateEnumCreated, + "deleted": ProvisioningStateEnumDeleted, + "failed": ProvisioningStateEnumFailed, + "succeeded": ProvisioningStateEnumSucceeded, + "unknown": ProvisioningStateEnumUnknown, + "updating": ProvisioningStateEnumUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningStateEnum(input) + return &out, nil +} + type SkuName string const ( SkuNameStandard SkuName = "Standard" ) +func PossibleValuesForSkuName() []string { + return []string{ + string(SkuNameStandard), + } +} + +func parseSkuName(input string) (*SkuName, error) { + vals := map[string]SkuName{ + "standard": SkuNameStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SkuName(input) + return &out, nil +} + type SkuTier string const ( SkuTierStandard SkuTier = "Standard" ) +func PossibleValuesForSkuTier() []string { + return []string{ + string(SkuTierStandard), + } +} + +func parseSkuTier(input string) (*SkuTier, error) { + vals := map[string]SkuTier{ + "standard": SkuTierStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SkuTier(input) + return &out, nil +} + type UnavailableReason string const ( @@ -48,3 +161,32 @@ const ( UnavailableReasonSubscriptionIsDisabled UnavailableReason = "SubscriptionIsDisabled" UnavailableReasonTooManyNamespaceInCurrentSubscription UnavailableReason = "TooManyNamespaceInCurrentSubscription" ) + +func PossibleValuesForUnavailableReason() []string { + return []string{ + string(UnavailableReasonInvalidName), + string(UnavailableReasonNameInLockdown), + string(UnavailableReasonNameInUse), + string(UnavailableReasonNone), + string(UnavailableReasonSubscriptionIsDisabled), + string(UnavailableReasonTooManyNamespaceInCurrentSubscription), + } +} + +func parseUnavailableReason(input string) (*UnavailableReason, error) { + vals := map[string]UnavailableReason{ + "invalidname": UnavailableReasonInvalidName, + "nameinlockdown": UnavailableReasonNameInLockdown, + "nameinuse": UnavailableReasonNameInUse, + "none": UnavailableReasonNone, + "subscriptionisdisabled": UnavailableReasonSubscriptionIsDisabled, + "toomanynamespaceincurrentsubscription": UnavailableReasonTooManyNamespaceInCurrentSubscription, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UnavailableReason(input) + return &out, nil +} diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule.go index fb47d638b71f..51a0830fec19 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = AuthorizationRuleId{} + +// AuthorizationRuleId is a struct representing the Resource ID for a Authorization Rule type AuthorizationRuleId struct { - SubscriptionId string - ResourceGroup string - NamespaceName string - Name string + SubscriptionId string + ResourceGroupName string + NamespaceName string + AuthorizationRuleName string } -func NewAuthorizationRuleID(subscriptionId, resourceGroup, namespaceName, name string) AuthorizationRuleId { +// NewAuthorizationRuleID returns a new AuthorizationRuleId struct +func NewAuthorizationRuleID(subscriptionId string, resourceGroupName string, namespaceName string, authorizationRuleName string) AuthorizationRuleId { return AuthorizationRuleId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - NamespaceName: namespaceName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, + AuthorizationRuleName: authorizationRuleName, } } -func (id AuthorizationRuleId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Namespace Name %q", id.NamespaceName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Authorization Rule", segmentsStr) -} - -func (id AuthorizationRuleId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/authorizationRules/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) -} - -// ParseAuthorizationRuleID parses a AuthorizationRule ID into an AuthorizationRuleId struct +// ParseAuthorizationRuleID parses 'input' into a AuthorizationRuleId func ParseAuthorizationRuleID(input string) (*AuthorizationRuleId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AuthorizationRuleId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AuthorizationRuleId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := AuthorizationRuleId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("authorizationRules"); err != nil { - return nil, err + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.AuthorizationRuleName, ok = parsed.Parsed["authorizationRuleName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationRuleName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseAuthorizationRuleIDInsensitively parses an AuthorizationRule ID into an AuthorizationRuleId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseAuthorizationRuleID method should be used instead for validation etc. +// ParseAuthorizationRuleIDInsensitively parses 'input' case-insensitively into a AuthorizationRuleId +// note: this method should only be used for API response data and not user input func ParseAuthorizationRuleIDInsensitively(input string) (*AuthorizationRuleId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AuthorizationRuleId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AuthorizationRuleId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := AuthorizationRuleId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'namespaces' segment - namespacesKey := "namespaces" - for key := range id.Path { - if strings.EqualFold(key, namespacesKey) { - namespacesKey = key - break - } + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { - return nil, err + + if id.AuthorizationRuleName, ok = parsed.Parsed["authorizationRuleName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationRuleName' was not found in the resource id %q", input) } - // find the correct casing for the 'authorizationRules' segment - authorizationRulesKey := "authorizationRules" - for key := range id.Path { - if strings.EqualFold(key, authorizationRulesKey) { - authorizationRulesKey = key - break - } + return &id, nil +} + +// ValidateAuthorizationRuleID checks that 'input' can be parsed as a Authorization Rule ID +func ValidateAuthorizationRuleID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(authorizationRulesKey); err != nil { - return nil, err + + if _, err := ParseAuthorizationRuleID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Authorization Rule ID +func (id AuthorizationRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/authorizationRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName, id.AuthorizationRuleName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Authorization Rule ID +func (id AuthorizationRuleId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), + resourceids.StaticSegment("authorizationRules", "authorizationRules", "authorizationRules"), + resourceids.UserSpecifiedSegment("authorizationRuleName", "authorizationRuleValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Authorization Rule ID +func (id AuthorizationRuleId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + fmt.Sprintf("Authorization Rule Name: %q", id.AuthorizationRuleName), + } + return fmt.Sprintf("Authorization Rule (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule_test.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule_test.go index 03768adf5a72..97c1fa5e575b 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule_test.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_authorizationrule_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = AuthorizationRuleId{} +var _ resourceids.ResourceId = AuthorizationRuleId{} -func TestAuthorizationRuleIDFormatter(t *testing.T) { - actual := NewAuthorizationRuleID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{authorizationRuleName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}" +func TestNewAuthorizationRuleID(t *testing.T) { + id := NewAuthorizationRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "authorizationRuleValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } + + if id.AuthorizationRuleName != "authorizationRuleValue" { + t.Fatalf("Expected %q but got %q for Segment 'AuthorizationRuleName'", id.AuthorizationRuleName, "authorizationRuleValue") + } +} + +func TestFormatAuthorizationRuleID(t *testing.T) { + actual := NewAuthorizationRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "authorizationRuleValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules/authorizationRuleValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseAuthorizationRuleID(t *testing.T) { Error bool Expected *AuthorizationRuleId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/authorizationRules/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules/authorizationRuleValue", Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{authorizationRuleName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + AuthorizationRuleName: "authorizationRuleValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.RELAY/NAMESPACES/{NAMESPACENAME}/AUTHORIZATIONRULES/{AUTHORIZATIONRULENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules/authorizationRuleValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseAuthorizationRuleID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseAuthorizationRuleID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.NamespaceName != v.Expected.NamespaceName { t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AuthorizationRuleName != v.Expected.AuthorizationRuleName { + t.Fatalf("Expected %q but got %q for AuthorizationRuleName", v.Expected.AuthorizationRuleName, actual.AuthorizationRuleName) } + } } @@ -131,106 +148,132 @@ func TestParseAuthorizationRuleIDInsensitively(t *testing.T) { Error bool Expected *AuthorizationRuleId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for NamespaceName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/authorizationRules/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{authorizationRuleName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/authorizationrules/{authorizationRuleName}", - Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{authorizationRuleName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NAMESPACES/{namespaceName}/AUTHORIZATIONRULES/{authorizationRuleName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/aUtHoRiZaTiOnRuLeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules/authorizationRuleValue", Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{authorizationRuleName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + AuthorizationRuleName: "authorizationRuleValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NaMeSpAcEs/{namespaceName}/AuThOrIzAtIoNrUlEs/{authorizationRuleName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/authorizationRules/authorizationRuleValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/aUtHoRiZaTiOnRuLeS/aUtHoRiZaTiOnRuLeVaLuE", Expected: &AuthorizationRuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - NamespaceName: "{namespaceName}", - Name: "{authorizationRuleName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", + AuthorizationRuleName: "aUtHoRiZaTiOnRuLeVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/aUtHoRiZaTiOnRuLeS/aUtHoRiZaTiOnRuLeVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseAuthorizationRuleIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,18 @@ func TestParseAuthorizationRuleIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.NamespaceName != v.Expected.NamespaceName { t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AuthorizationRuleName != v.Expected.AuthorizationRuleName { + t.Fatalf("Expected %q but got %q for AuthorizationRuleName", v.Expected.AuthorizationRuleName, actual.AuthorizationRuleName) } + } } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace.go index ca9526d69ee0..9d4640bc054b 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = NamespaceId{} + +// NamespaceId is a struct representing the Resource ID for a Namespace type NamespaceId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + NamespaceName string } -func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { +// NewNamespaceID returns a new NamespaceId struct +func NewNamespaceID(subscriptionId string, resourceGroupName string, namespaceName string) NamespaceId { return NamespaceId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, } } -func (id NamespaceId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) -} - -func (id NamespaceId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParseNamespaceID parses a Namespace ID into an NamespaceId struct +// ParseNamespaceID parses 'input' into a NamespaceId func ParseNamespaceID(input string) (*NamespaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(NamespaceId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := NamespaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := NamespaceId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseNamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseNamespaceID method should be used instead for validation etc. +// ParseNamespaceIDInsensitively parses 'input' case-insensitively into a NamespaceId +// note: this method should only be used for API response data and not user input func ParseNamespaceIDInsensitively(input string) (*NamespaceId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(NamespaceId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := NamespaceId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := NamespaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) } - // find the correct casing for the 'namespaces' segment - namespacesKey := "namespaces" - for key := range id.Path { - if strings.EqualFold(key, namespacesKey) { - namespacesKey = key - break - } + return &id, nil +} + +// ValidateNamespaceID checks that 'input' can be parsed as a Namespace ID +func ValidateNamespaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { - return nil, err + + if _, err := ParseNamespaceID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Namespace ID +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Namespace ID +func (id NamespaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Namespace ID +func (id NamespaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + } + return fmt.Sprintf("Namespace (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace_test.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace_test.go index 3bd99abd0c86..6506924c62ef 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace_test.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_namespace_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = NamespaceId{} +var _ resourceids.ResourceId = NamespaceId{} -func TestNamespaceIDFormatter(t *testing.T) { - actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}" +func TestNewNamespaceID(t *testing.T) { + id := NewNamespaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } +} + +func TestFormatNamespaceID(t *testing.T) { + actual := NewNamespaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseNamespaceID(t *testing.T) { Error bool Expected *NamespaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.RELAY/NAMESPACES/{NAMESPACENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseNamespaceID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseNamespaceID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } + } } @@ -115,90 +129,110 @@ func TestParseNamespaceIDInsensitively(t *testing.T) { Error bool Expected *NamespaceId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}", - Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}", - Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NAMESPACES/{namespaceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/NaMeSpAcEs/{namespaceName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", Expected: &NamespaceId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{namespaceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseNamespaceIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseNamespaceIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) } + } } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup.go index 2af65df03159..7df426d5c9fa 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup_test.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup_test.go index 1ebfa7a6fb07..8bd078b3b0b8 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup_test.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,10 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription.go index 6b6c89cbb5e9..e110fef67162 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription_test.go b/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription_test.go index ded9d0f7b7d9..d762b8e20b6b 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription_test.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,6 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/model_relaynamespaceproperties.go b/internal/services/relay/sdk/2017-04-01/namespaces/model_relaynamespaceproperties.go index 345205e805e9..2c82afd54a44 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/model_relaynamespaceproperties.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/model_relaynamespaceproperties.go @@ -1,11 +1,5 @@ package namespaces -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - type RelayNamespaceProperties struct { CreatedAt *string `json:"createdAt,omitempty"` MetricId *string `json:"metricId,omitempty"` @@ -13,21 +7,3 @@ type RelayNamespaceProperties struct { ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"` UpdatedAt *string `json:"updatedAt,omitempty"` } - -func (o RelayNamespaceProperties) GetCreatedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o RelayNamespaceProperties) SetCreatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.CreatedAt = &formatted -} - -func (o RelayNamespaceProperties) GetUpdatedAtAsTime() (*time.Time, error) { - return dates.ParseAsFormat(o.UpdatedAt, "2006-01-02T15:04:05Z07:00") -} - -func (o RelayNamespaceProperties) SetUpdatedAtAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.UpdatedAt = &formatted -} diff --git a/internal/services/relay/sdk/2017-04-01/namespaces/predicates.go b/internal/services/relay/sdk/2017-04-01/namespaces/predicates.go index fe6cc72b6e33..1cc0ca9847b7 100644 --- a/internal/services/relay/sdk/2017-04-01/namespaces/predicates.go +++ b/internal/services/relay/sdk/2017-04-01/namespaces/predicates.go @@ -7,6 +7,7 @@ type AuthorizationRulePredicate struct { } func (p AuthorizationRulePredicate) Matches(input AuthorizationRule) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } @@ -30,6 +31,7 @@ type RelayNamespacePredicate struct { } func (p RelayNamespacePredicate) Matches(input RelayNamespace) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/client.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/client.go new file mode 100644 index 000000000000..531e56c3737d --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/client.go @@ -0,0 +1,15 @@ +package wcfrelays + +import "github.com/Azure/go-autorest/autorest" + +type WCFRelaysClient struct { + Client autorest.Client + baseUri string +} + +func NewWCFRelaysClientWithBaseURI(endpoint string) WCFRelaysClient { + return WCFRelaysClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/constants.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/constants.go new file mode 100644 index 000000000000..845860d1c419 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/constants.go @@ -0,0 +1,90 @@ +package wcfrelays + +import "strings" + +type AccessRights string + +const ( + AccessRightsListen AccessRights = "Listen" + AccessRightsManage AccessRights = "Manage" + AccessRightsSend AccessRights = "Send" +) + +func PossibleValuesForAccessRights() []string { + return []string{ + string(AccessRightsListen), + string(AccessRightsManage), + string(AccessRightsSend), + } +} + +func parseAccessRights(input string) (*AccessRights, error) { + vals := map[string]AccessRights{ + "listen": AccessRightsListen, + "manage": AccessRightsManage, + "send": AccessRightsSend, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccessRights(input) + return &out, nil +} + +type KeyType string + +const ( + KeyTypePrimaryKey KeyType = "PrimaryKey" + KeyTypeSecondaryKey KeyType = "SecondaryKey" +) + +func PossibleValuesForKeyType() []string { + return []string{ + string(KeyTypePrimaryKey), + string(KeyTypeSecondaryKey), + } +} + +func parseKeyType(input string) (*KeyType, error) { + vals := map[string]KeyType{ + "primarykey": KeyTypePrimaryKey, + "secondarykey": KeyTypeSecondaryKey, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyType(input) + return &out, nil +} + +type Relaytype string + +const ( + RelaytypeHttp Relaytype = "Http" + RelaytypeNetTcp Relaytype = "NetTcp" +) + +func PossibleValuesForRelaytype() []string { + return []string{ + string(RelaytypeHttp), + string(RelaytypeNetTcp), + } +} + +func parseRelaytype(input string) (*Relaytype, error) { + vals := map[string]Relaytype{ + "http": RelaytypeHttp, + "nettcp": RelaytypeNetTcp, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Relaytype(input) + return &out, nil +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/id_namespace.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_namespace.go new file mode 100644 index 000000000000..3a14c549d435 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_namespace.go @@ -0,0 +1,124 @@ +package wcfrelays + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = NamespaceId{} + +// NamespaceId is a struct representing the Resource ID for a Namespace +type NamespaceId struct { + SubscriptionId string + ResourceGroupName string + NamespaceName string +} + +// NewNamespaceID returns a new NamespaceId struct +func NewNamespaceID(subscriptionId string, resourceGroupName string, namespaceName string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, + } +} + +// ParseNamespaceID parses 'input' into a NamespaceId +func ParseNamespaceID(input string) (*NamespaceId, error) { + parser := resourceids.NewParserFromResourceIdType(NamespaceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := NamespaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseNamespaceIDInsensitively parses 'input' case-insensitively into a NamespaceId +// note: this method should only be used for API response data and not user input +func ParseNamespaceIDInsensitively(input string) (*NamespaceId, error) { + parser := resourceids.NewParserFromResourceIdType(NamespaceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := NamespaceId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateNamespaceID checks that 'input' can be parsed as a Namespace ID +func ValidateNamespaceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNamespaceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Namespace ID +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Namespace ID +func (id NamespaceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), + } +} + +// String returns a human-readable description of this Namespace ID +func (id NamespaceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + } + return fmt.Sprintf("Namespace (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/id_namespace_test.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_namespace_test.go new file mode 100644 index 000000000000..9ea5afdac397 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_namespace_test.go @@ -0,0 +1,264 @@ +package wcfrelays + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = NamespaceId{} + +func TestNewNamespaceID(t *testing.T) { + id := NewNamespaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } +} + +func TestFormatNamespaceID(t *testing.T) { + actual := NewNamespaceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Expected: &NamespaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + } +} + +func TestParseNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Expected: &NamespaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", + Expected: &NamespaceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + } +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelay.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelay.go new file mode 100644 index 000000000000..687b2c9f1f1c --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelay.go @@ -0,0 +1,137 @@ +package wcfrelays + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WcfRelayId{} + +// WcfRelayId is a struct representing the Resource ID for a Wcf Relay +type WcfRelayId struct { + SubscriptionId string + ResourceGroupName string + NamespaceName string + RelayName string +} + +// NewWcfRelayID returns a new WcfRelayId struct +func NewWcfRelayID(subscriptionId string, resourceGroupName string, namespaceName string, relayName string) WcfRelayId { + return WcfRelayId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, + RelayName: relayName, + } +} + +// ParseWcfRelayID parses 'input' into a WcfRelayId +func ParseWcfRelayID(input string) (*WcfRelayId, error) { + parser := resourceids.NewParserFromResourceIdType(WcfRelayId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WcfRelayId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + if id.RelayName, ok = parsed.Parsed["relayName"]; !ok { + return nil, fmt.Errorf("the segment 'relayName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseWcfRelayIDInsensitively parses 'input' case-insensitively into a WcfRelayId +// note: this method should only be used for API response data and not user input +func ParseWcfRelayIDInsensitively(input string) (*WcfRelayId, error) { + parser := resourceids.NewParserFromResourceIdType(WcfRelayId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WcfRelayId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + if id.RelayName, ok = parsed.Parsed["relayName"]; !ok { + return nil, fmt.Errorf("the segment 'relayName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateWcfRelayID checks that 'input' can be parsed as a Wcf Relay ID +func ValidateWcfRelayID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseWcfRelayID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Wcf Relay ID +func (id WcfRelayId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/wcfRelays/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName, id.RelayName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Wcf Relay ID +func (id WcfRelayId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), + resourceids.StaticSegment("wcfRelays", "wcfRelays", "wcfRelays"), + resourceids.UserSpecifiedSegment("relayName", "relayValue"), + } +} + +// String returns a human-readable description of this Wcf Relay ID +func (id WcfRelayId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + fmt.Sprintf("Relay Name: %q", id.RelayName), + } + return fmt.Sprintf("Wcf Relay (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelay_test.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelay_test.go new file mode 100644 index 000000000000..e04aabc1af82 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelay_test.go @@ -0,0 +1,309 @@ +package wcfrelays + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WcfRelayId{} + +func TestNewWcfRelayID(t *testing.T) { + id := NewWcfRelayID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "relayValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } + + if id.RelayName != "relayValue" { + t.Fatalf("Expected %q but got %q for Segment 'RelayName'", id.RelayName, "relayValue") + } +} + +func TestFormatWcfRelayID(t *testing.T) { + actual := NewWcfRelayID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "relayValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseWcfRelayID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WcfRelayId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue", + Expected: &WcfRelayId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + RelayName: "relayValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWcfRelayID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + if actual.RelayName != v.Expected.RelayName { + t.Fatalf("Expected %q but got %q for RelayName", v.Expected.RelayName, actual.RelayName) + } + + } +} + +func TestParseWcfRelayIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WcfRelayId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue", + Expected: &WcfRelayId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + RelayName: "relayValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs/rElAyVaLuE", + Expected: &WcfRelayId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", + RelayName: "rElAyVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs/rElAyVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWcfRelayIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + if actual.RelayName != v.Expected.RelayName { + t.Fatalf("Expected %q but got %q for RelayName", v.Expected.RelayName, actual.RelayName) + } + + } +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelayauthorizationrule.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelayauthorizationrule.go new file mode 100644 index 000000000000..968ebf0326de --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelayauthorizationrule.go @@ -0,0 +1,150 @@ +package wcfrelays + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WcfRelayAuthorizationRuleId{} + +// WcfRelayAuthorizationRuleId is a struct representing the Resource ID for a Wcf Relay Authorization Rule +type WcfRelayAuthorizationRuleId struct { + SubscriptionId string + ResourceGroupName string + NamespaceName string + RelayName string + AuthorizationRuleName string +} + +// NewWcfRelayAuthorizationRuleID returns a new WcfRelayAuthorizationRuleId struct +func NewWcfRelayAuthorizationRuleID(subscriptionId string, resourceGroupName string, namespaceName string, relayName string, authorizationRuleName string) WcfRelayAuthorizationRuleId { + return WcfRelayAuthorizationRuleId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NamespaceName: namespaceName, + RelayName: relayName, + AuthorizationRuleName: authorizationRuleName, + } +} + +// ParseWcfRelayAuthorizationRuleID parses 'input' into a WcfRelayAuthorizationRuleId +func ParseWcfRelayAuthorizationRuleID(input string) (*WcfRelayAuthorizationRuleId, error) { + parser := resourceids.NewParserFromResourceIdType(WcfRelayAuthorizationRuleId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WcfRelayAuthorizationRuleId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + if id.RelayName, ok = parsed.Parsed["relayName"]; !ok { + return nil, fmt.Errorf("the segment 'relayName' was not found in the resource id %q", input) + } + + if id.AuthorizationRuleName, ok = parsed.Parsed["authorizationRuleName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationRuleName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseWcfRelayAuthorizationRuleIDInsensitively parses 'input' case-insensitively into a WcfRelayAuthorizationRuleId +// note: this method should only be used for API response data and not user input +func ParseWcfRelayAuthorizationRuleIDInsensitively(input string) (*WcfRelayAuthorizationRuleId, error) { + parser := resourceids.NewParserFromResourceIdType(WcfRelayAuthorizationRuleId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := WcfRelayAuthorizationRuleId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.NamespaceName, ok = parsed.Parsed["namespaceName"]; !ok { + return nil, fmt.Errorf("the segment 'namespaceName' was not found in the resource id %q", input) + } + + if id.RelayName, ok = parsed.Parsed["relayName"]; !ok { + return nil, fmt.Errorf("the segment 'relayName' was not found in the resource id %q", input) + } + + if id.AuthorizationRuleName, ok = parsed.Parsed["authorizationRuleName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationRuleName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateWcfRelayAuthorizationRuleID checks that 'input' can be parsed as a Wcf Relay Authorization Rule ID +func ValidateWcfRelayAuthorizationRuleID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseWcfRelayAuthorizationRuleID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Wcf Relay Authorization Rule ID +func (id WcfRelayAuthorizationRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Relay/namespaces/%s/wcfRelays/%s/authorizationRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NamespaceName, id.RelayName, id.AuthorizationRuleName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Wcf Relay Authorization Rule ID +func (id WcfRelayAuthorizationRuleId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftRelay", "Microsoft.Relay", "Microsoft.Relay"), + resourceids.StaticSegment("namespaces", "namespaces", "namespaces"), + resourceids.UserSpecifiedSegment("namespaceName", "namespaceValue"), + resourceids.StaticSegment("wcfRelays", "wcfRelays", "wcfRelays"), + resourceids.UserSpecifiedSegment("relayName", "relayValue"), + resourceids.StaticSegment("authorizationRules", "authorizationRules", "authorizationRules"), + resourceids.UserSpecifiedSegment("authorizationRuleName", "authorizationRuleValue"), + } +} + +// String returns a human-readable description of this Wcf Relay Authorization Rule ID +func (id WcfRelayAuthorizationRuleId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Namespace Name: %q", id.NamespaceName), + fmt.Sprintf("Relay Name: %q", id.RelayName), + fmt.Sprintf("Authorization Rule Name: %q", id.AuthorizationRuleName), + } + return fmt.Sprintf("Wcf Relay Authorization Rule (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelayauthorizationrule_test.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelayauthorizationrule_test.go new file mode 100644 index 000000000000..7f5e2359c0f2 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/id_wcfrelayauthorizationrule_test.go @@ -0,0 +1,354 @@ +package wcfrelays + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = WcfRelayAuthorizationRuleId{} + +func TestNewWcfRelayAuthorizationRuleID(t *testing.T) { + id := NewWcfRelayAuthorizationRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "relayValue", "authorizationRuleValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NamespaceName != "namespaceValue" { + t.Fatalf("Expected %q but got %q for Segment 'NamespaceName'", id.NamespaceName, "namespaceValue") + } + + if id.RelayName != "relayValue" { + t.Fatalf("Expected %q but got %q for Segment 'RelayName'", id.RelayName, "relayValue") + } + + if id.AuthorizationRuleName != "authorizationRuleValue" { + t.Fatalf("Expected %q but got %q for Segment 'AuthorizationRuleName'", id.AuthorizationRuleName, "authorizationRuleValue") + } +} + +func TestFormatWcfRelayAuthorizationRuleID(t *testing.T) { + actual := NewWcfRelayAuthorizationRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "namespaceValue", "relayValue", "authorizationRuleValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules/authorizationRuleValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseWcfRelayAuthorizationRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WcfRelayAuthorizationRuleId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules/authorizationRuleValue", + Expected: &WcfRelayAuthorizationRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + RelayName: "relayValue", + AuthorizationRuleName: "authorizationRuleValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules/authorizationRuleValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWcfRelayAuthorizationRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + if actual.RelayName != v.Expected.RelayName { + t.Fatalf("Expected %q but got %q for RelayName", v.Expected.RelayName, actual.RelayName) + } + + if actual.AuthorizationRuleName != v.Expected.AuthorizationRuleName { + t.Fatalf("Expected %q but got %q for AuthorizationRuleName", v.Expected.AuthorizationRuleName, actual.AuthorizationRuleName) + } + + } +} + +func TestParseWcfRelayAuthorizationRuleIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WcfRelayAuthorizationRuleId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs/rElAyVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs/rElAyVaLuE/aUtHoRiZaTiOnRuLeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules/authorizationRuleValue", + Expected: &WcfRelayAuthorizationRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NamespaceName: "namespaceValue", + RelayName: "relayValue", + AuthorizationRuleName: "authorizationRuleValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Relay/namespaces/namespaceValue/wcfRelays/relayValue/authorizationRules/authorizationRuleValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs/rElAyVaLuE/aUtHoRiZaTiOnRuLeS/aUtHoRiZaTiOnRuLeVaLuE", + Expected: &WcfRelayAuthorizationRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NamespaceName: "nAmEsPaCeVaLuE", + RelayName: "rElAyVaLuE", + AuthorizationRuleName: "aUtHoRiZaTiOnRuLeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.rElAy/nAmEsPaCeS/nAmEsPaCeVaLuE/wCfReLaYs/rElAyVaLuE/aUtHoRiZaTiOnRuLeS/aUtHoRiZaTiOnRuLeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseWcfRelayAuthorizationRuleIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + + if actual.RelayName != v.Expected.RelayName { + t.Fatalf("Expected %q but got %q for RelayName", v.Expected.RelayName, actual.RelayName) + } + + if actual.AuthorizationRuleName != v.Expected.AuthorizationRuleName { + t.Fatalf("Expected %q but got %q for AuthorizationRuleName", v.Expected.AuthorizationRuleName, actual.AuthorizationRuleName) + } + + } +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_createorupdate_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_createorupdate_autorest.go new file mode 100644 index 000000000000..9355bfbdb9b3 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_createorupdate_autorest.go @@ -0,0 +1,65 @@ +package wcfrelays + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateResponse struct { + HttpResponse *http.Response + Model *WcfRelay +} + +// CreateOrUpdate ... +func (c WCFRelaysClient) CreateOrUpdate(ctx context.Context, id WcfRelayId, input WcfRelay) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c WCFRelaysClient) preparerForCreateOrUpdate(ctx context.Context, id WcfRelayId, input WcfRelay) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_createorupdateauthorizationrule_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_createorupdateauthorizationrule_autorest.go new file mode 100644 index 000000000000..c8e1436bc99b --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_createorupdateauthorizationrule_autorest.go @@ -0,0 +1,65 @@ +package wcfrelays + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateAuthorizationRuleResponse struct { + HttpResponse *http.Response + Model *AuthorizationRule +} + +// CreateOrUpdateAuthorizationRule ... +func (c WCFRelaysClient) CreateOrUpdateAuthorizationRule(ctx context.Context, id WcfRelayAuthorizationRuleId, input AuthorizationRule) (result CreateOrUpdateAuthorizationRuleResponse, err error) { + req, err := c.preparerForCreateOrUpdateAuthorizationRule(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "CreateOrUpdateAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdateAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "CreateOrUpdateAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdateAuthorizationRule prepares the CreateOrUpdateAuthorizationRule request. +func (c WCFRelaysClient) preparerForCreateOrUpdateAuthorizationRule(ctx context.Context, id WcfRelayAuthorizationRuleId, input AuthorizationRule) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdateAuthorizationRule handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForCreateOrUpdateAuthorizationRule(resp *http.Response) (result CreateOrUpdateAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_delete_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_delete_autorest.go new file mode 100644 index 000000000000..6e1be16156f8 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_delete_autorest.go @@ -0,0 +1,61 @@ +package wcfrelays + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c WCFRelaysClient) Delete(ctx context.Context, id WcfRelayId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c WCFRelaysClient) preparerForDelete(ctx context.Context, id WcfRelayId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_deleteauthorizationrule_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_deleteauthorizationrule_autorest.go new file mode 100644 index 000000000000..5f24a8df73dc --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_deleteauthorizationrule_autorest.go @@ -0,0 +1,61 @@ +package wcfrelays + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteAuthorizationRuleResponse struct { + HttpResponse *http.Response +} + +// DeleteAuthorizationRule ... +func (c WCFRelaysClient) DeleteAuthorizationRule(ctx context.Context, id WcfRelayAuthorizationRuleId) (result DeleteAuthorizationRuleResponse, err error) { + req, err := c.preparerForDeleteAuthorizationRule(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "DeleteAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDeleteAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "DeleteAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDeleteAuthorizationRule prepares the DeleteAuthorizationRule request. +func (c WCFRelaysClient) preparerForDeleteAuthorizationRule(ctx context.Context, id WcfRelayAuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDeleteAuthorizationRule handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForDeleteAuthorizationRule(resp *http.Response) (result DeleteAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_get_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_get_autorest.go new file mode 100644 index 000000000000..e49673ffadd2 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_get_autorest.go @@ -0,0 +1,64 @@ +package wcfrelays + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *WcfRelay +} + +// Get ... +func (c WCFRelaysClient) Get(ctx context.Context, id WcfRelayId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c WCFRelaysClient) preparerForGet(ctx context.Context, id WcfRelayId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_getauthorizationrule_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_getauthorizationrule_autorest.go new file mode 100644 index 000000000000..f8a2a6a562e5 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_getauthorizationrule_autorest.go @@ -0,0 +1,64 @@ +package wcfrelays + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetAuthorizationRuleResponse struct { + HttpResponse *http.Response + Model *AuthorizationRule +} + +// GetAuthorizationRule ... +func (c WCFRelaysClient) GetAuthorizationRule(ctx context.Context, id WcfRelayAuthorizationRuleId) (result GetAuthorizationRuleResponse, err error) { + req, err := c.preparerForGetAuthorizationRule(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "GetAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "GetAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "GetAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetAuthorizationRule prepares the GetAuthorizationRule request. +func (c WCFRelaysClient) preparerForGetAuthorizationRule(ctx context.Context, id WcfRelayAuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetAuthorizationRule handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForGetAuthorizationRule(resp *http.Response) (result GetAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listauthorizationrules_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listauthorizationrules_autorest.go new file mode 100644 index 000000000000..598b44813a95 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listauthorizationrules_autorest.go @@ -0,0 +1,183 @@ +package wcfrelays + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListAuthorizationRulesResponse struct { + HttpResponse *http.Response + Model *[]AuthorizationRule + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListAuthorizationRulesResponse, error) +} + +type ListAuthorizationRulesCompleteResult struct { + Items []AuthorizationRule +} + +func (r ListAuthorizationRulesResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListAuthorizationRulesResponse) LoadMore(ctx context.Context) (resp ListAuthorizationRulesResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ListAuthorizationRules ... +func (c WCFRelaysClient) ListAuthorizationRules(ctx context.Context, id WcfRelayId) (resp ListAuthorizationRulesResponse, err error) { + req, err := c.preparerForListAuthorizationRules(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListAuthorizationRules", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListAuthorizationRules", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListAuthorizationRules(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListAuthorizationRules", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListAuthorizationRulesComplete retrieves all of the results into a single object +func (c WCFRelaysClient) ListAuthorizationRulesComplete(ctx context.Context, id WcfRelayId) (ListAuthorizationRulesCompleteResult, error) { + return c.ListAuthorizationRulesCompleteMatchingPredicate(ctx, id, AuthorizationRulePredicate{}) +} + +// ListAuthorizationRulesCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c WCFRelaysClient) ListAuthorizationRulesCompleteMatchingPredicate(ctx context.Context, id WcfRelayId, predicate AuthorizationRulePredicate) (resp ListAuthorizationRulesCompleteResult, err error) { + items := make([]AuthorizationRule, 0) + + page, err := c.ListAuthorizationRules(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListAuthorizationRulesCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListAuthorizationRules prepares the ListAuthorizationRules request. +func (c WCFRelaysClient) preparerForListAuthorizationRules(ctx context.Context, id WcfRelayId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/authorizationRules", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListAuthorizationRulesWithNextLink prepares the ListAuthorizationRules request with the given nextLink token. +func (c WCFRelaysClient) preparerForListAuthorizationRulesWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListAuthorizationRules handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForListAuthorizationRules(resp *http.Response) (result ListAuthorizationRulesResponse, err error) { + type page struct { + Values []AuthorizationRule `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListAuthorizationRulesResponse, err error) { + req, err := c.preparerForListAuthorizationRulesWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListAuthorizationRules", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListAuthorizationRules", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListAuthorizationRules(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListAuthorizationRules", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listbynamespace_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listbynamespace_autorest.go new file mode 100644 index 000000000000..4689e12f94b4 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listbynamespace_autorest.go @@ -0,0 +1,183 @@ +package wcfrelays + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListByNamespaceResponse struct { + HttpResponse *http.Response + Model *[]WcfRelay + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByNamespaceResponse, error) +} + +type ListByNamespaceCompleteResult struct { + Items []WcfRelay +} + +func (r ListByNamespaceResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByNamespaceResponse) LoadMore(ctx context.Context) (resp ListByNamespaceResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ListByNamespace ... +func (c WCFRelaysClient) ListByNamespace(ctx context.Context, id NamespaceId) (resp ListByNamespaceResponse, err error) { + req, err := c.preparerForListByNamespace(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListByNamespace", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListByNamespace", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByNamespace(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListByNamespace", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListByNamespaceComplete retrieves all of the results into a single object +func (c WCFRelaysClient) ListByNamespaceComplete(ctx context.Context, id NamespaceId) (ListByNamespaceCompleteResult, error) { + return c.ListByNamespaceCompleteMatchingPredicate(ctx, id, WcfRelayPredicate{}) +} + +// ListByNamespaceCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c WCFRelaysClient) ListByNamespaceCompleteMatchingPredicate(ctx context.Context, id NamespaceId, predicate WcfRelayPredicate) (resp ListByNamespaceCompleteResult, err error) { + items := make([]WcfRelay, 0) + + page, err := c.ListByNamespace(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByNamespaceCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListByNamespace prepares the ListByNamespace request. +func (c WCFRelaysClient) preparerForListByNamespace(ctx context.Context, id NamespaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/wcfRelays", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByNamespaceWithNextLink prepares the ListByNamespace request with the given nextLink token. +func (c WCFRelaysClient) preparerForListByNamespaceWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByNamespace handles the response to the ListByNamespace request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForListByNamespace(resp *http.Response) (result ListByNamespaceResponse, err error) { + type page struct { + Values []WcfRelay `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByNamespaceResponse, err error) { + req, err := c.preparerForListByNamespaceWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListByNamespace", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListByNamespace", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByNamespace(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListByNamespace", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listkeys_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listkeys_autorest.go new file mode 100644 index 000000000000..02ccae927696 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_listkeys_autorest.go @@ -0,0 +1,65 @@ +package wcfrelays + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListKeysResponse struct { + HttpResponse *http.Response + Model *AccessKeys +} + +// ListKeys ... +func (c WCFRelaysClient) ListKeys(ctx context.Context, id WcfRelayAuthorizationRuleId) (result ListKeysResponse, err error) { + req, err := c.preparerForListKeys(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "ListKeys", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForListKeys prepares the ListKeys request. +func (c WCFRelaysClient) preparerForListKeys(ctx context.Context, id WcfRelayAuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/listKeys", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListKeys handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForListKeys(resp *http.Response) (result ListKeysResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/method_regeneratekeys_autorest.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_regeneratekeys_autorest.go new file mode 100644 index 000000000000..496228169180 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/method_regeneratekeys_autorest.go @@ -0,0 +1,66 @@ +package wcfrelays + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type RegenerateKeysResponse struct { + HttpResponse *http.Response + Model *AccessKeys +} + +// RegenerateKeys ... +func (c WCFRelaysClient) RegenerateKeys(ctx context.Context, id WcfRelayAuthorizationRuleId, input RegenerateAccessKeyParameters) (result RegenerateKeysResponse, err error) { + req, err := c.preparerForRegenerateKeys(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "RegenerateKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "RegenerateKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForRegenerateKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "wcfrelays.WCFRelaysClient", "RegenerateKeys", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForRegenerateKeys prepares the RegenerateKeys request. +func (c WCFRelaysClient) preparerForRegenerateKeys(ctx context.Context, id WcfRelayAuthorizationRuleId, input RegenerateAccessKeyParameters) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/regenerateKeys", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForRegenerateKeys handles the response to the RegenerateKeys request. The method always +// closes the http.Response Body. +func (c WCFRelaysClient) responderForRegenerateKeys(resp *http.Response) (result RegenerateKeysResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/model_accesskeys.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_accesskeys.go new file mode 100644 index 000000000000..135f668e9b89 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_accesskeys.go @@ -0,0 +1,9 @@ +package wcfrelays + +type AccessKeys struct { + KeyName *string `json:"keyName,omitempty"` + PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/model_authorizationrule.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_authorizationrule.go new file mode 100644 index 000000000000..e81cea5dce5e --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_authorizationrule.go @@ -0,0 +1,8 @@ +package wcfrelays + +type AuthorizationRule struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties AuthorizationRuleProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/model_authorizationruleproperties.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_authorizationruleproperties.go new file mode 100644 index 000000000000..ad6c7c8ff6ce --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_authorizationruleproperties.go @@ -0,0 +1,5 @@ +package wcfrelays + +type AuthorizationRuleProperties struct { + Rights []AccessRights `json:"rights"` +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/model_regenerateaccesskeyparameters.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_regenerateaccesskeyparameters.go new file mode 100644 index 000000000000..fbe0eea2f2fb --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_regenerateaccesskeyparameters.go @@ -0,0 +1,6 @@ +package wcfrelays + +type RegenerateAccessKeyParameters struct { + Key *string `json:"key,omitempty"` + KeyType KeyType `json:"keyType"` +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/model_wcfrelay.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_wcfrelay.go new file mode 100644 index 000000000000..ddb93c1c7cf8 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_wcfrelay.go @@ -0,0 +1,8 @@ +package wcfrelays + +type WcfRelay struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *WcfRelayProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/model_wcfrelayproperties.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_wcfrelayproperties.go new file mode 100644 index 000000000000..6e961283cdf8 --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/model_wcfrelayproperties.go @@ -0,0 +1,12 @@ +package wcfrelays + +type WcfRelayProperties struct { + CreatedAt *string `json:"createdAt,omitempty"` + IsDynamic *bool `json:"isDynamic,omitempty"` + ListenerCount *int64 `json:"listenerCount,omitempty"` + RelayType *Relaytype `json:"relayType,omitempty"` + RequiresClientAuthorization *bool `json:"requiresClientAuthorization,omitempty"` + RequiresTransportSecurity *bool `json:"requiresTransportSecurity,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty"` + UserMetadata *string `json:"userMetadata,omitempty"` +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/predicates.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/predicates.go new file mode 100644 index 000000000000..e8be9910626a --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/predicates.go @@ -0,0 +1,47 @@ +package wcfrelays + +type AuthorizationRulePredicate struct { + Id *string + Name *string + Type *string +} + +func (p AuthorizationRulePredicate) Matches(input AuthorizationRule) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} + +type WcfRelayPredicate struct { + Id *string + Name *string + Type *string +} + +func (p WcfRelayPredicate) Matches(input WcfRelay) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/internal/services/relay/sdk/2017-04-01/wcfrelays/version.go b/internal/services/relay/sdk/2017-04-01/wcfrelays/version.go new file mode 100644 index 000000000000..35ed6d53043a --- /dev/null +++ b/internal/services/relay/sdk/2017-04-01/wcfrelays/version.go @@ -0,0 +1,9 @@ +package wcfrelays + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/wcfrelays/%s", defaultApiVersion) +} diff --git a/internal/services/servicefabric/service_fabric_cluster_resource_test.go b/internal/services/servicefabric/service_fabric_cluster_resource_test.go index f23231f5e112..89e866a49951 100644 --- a/internal/services/servicefabric/service_fabric_cluster_resource_test.go +++ b/internal/services/servicefabric/service_fabric_cluster_resource_test.go @@ -1370,13 +1370,22 @@ resource "azurerm_resource_group" "test" { data "azurerm_client_config" "current" { } +data "azuread_domains" "test" { +} + resource "azuread_application" "cluster_explorer" { - name = "${azurerm_resource_group.test.name}-explorer-AAD" - homepage = "https://example:19080/Explorer/index.html" - identifier_uris = ["https://example%d:19080/Explorer/index.html"] - reply_urls = ["https://example:19080/Explorer/index.html"] - available_to_other_tenants = false - oauth2_allow_implicit_flow = true + display_name = "${azurerm_resource_group.test.name}-explorer-AAD" + identifier_uris = ["https://test-%s.${data.azuread_domains.test.domains[0].domain_name}:19080/Explorer/index.html"] + web { + homepage_url = "https://example:19080/Explorer/index.html" + redirect_uris = ["https://example:19080/Explorer/index.html"] + + implicit_grant { + access_token_issuance_enabled = true + } + } + sign_in_audience = "AzureADMyOrg" + # https://blogs.msdn.microsoft.com/aaddevsup/2018/06/06/guid-table-for-windows-azure-active-directory-permissions/ # https://shawntabrizi.com/aad/common-microsoft-resources-azure-active-directory/ @@ -1395,11 +1404,15 @@ resource "azuread_service_principal" "cluster_explorer" { } resource "azuread_application" "cluster_console" { - name = "${azurerm_resource_group.test.name}-console-AAD" - type = "native" - reply_urls = ["urn:ietf:wg:oauth:2.0:oob"] - available_to_other_tenants = false - oauth2_allow_implicit_flow = true + display_name = "${azurerm_resource_group.test.name}-console-AAD" + sign_in_audience = "AzureADMyOrg" + web { + redirect_uris = ["urn:ietf:wg:oauth:2.0:oob"] + + implicit_grant { + access_token_issuance_enabled = true + } + } # https://blogs.msdn.microsoft.com/aaddevsup/2018/06/06/guid-table-for-windows-azure-active-directory-permissions/ # https://shawntabrizi.com/aad/common-microsoft-resources-azure-active-directory/ @@ -1462,7 +1475,7 @@ resource "azurerm_service_fabric_cluster" "test" { http_endpoint_port = 19080 } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger) } func (r ServiceFabricClusterResource) azureActiveDirectoryDelete(data acceptance.TestData) string { diff --git a/internal/services/signalr/migration/network_acl_v0_to_v1.go b/internal/services/signalr/migration/network_acl_v0_to_v1.go new file mode 100644 index 000000000000..eb7191a3ae89 --- /dev/null +++ b/internal/services/signalr/migration/network_acl_v0_to_v1.go @@ -0,0 +1,111 @@ +package migration + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/sdk/2020-05-01/signalr" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +var _ pluginsdk.StateUpgrade = NetworkAclV0ToV1{} + +type NetworkAclV0ToV1 struct { +} + +func (n NetworkAclV0ToV1) Schema() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "signalr_service_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "default_action": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "public_network": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "allowed_request_types": { + Type: pluginsdk.TypeSet, + Optional: true, + ConflictsWith: []string{"public_network.0.denied_request_types"}, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "denied_request_types": { + Type: pluginsdk.TypeSet, + Optional: true, + ConflictsWith: []string{"public_network.0.allowed_request_types"}, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + + "private_endpoint": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "id": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "allowed_request_types": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "denied_request_types": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + } +} + +func (n NetworkAclV0ToV1) UpgradeFunc() pluginsdk.StateUpgraderFunc { + return func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Println("[DEBUG] Migrating SignalR Network ACL from v0 to v1 format") + + // the old segment is `SignalR` but should be `signalR` + oldId := rawState["id"].(string) + parsed, err := signalr.ParseSignalRIDInsensitively(oldId) + if err != nil { + return rawState, fmt.Errorf("parsing Old Resource ID %q: %+v", oldId, err) + } + + newId := parsed.ID() + log.Printf("[DEBUG] Updating ID from %q to %q", oldId, newId) + rawState["id"] = newId + + // SignalR Service ID is the same as ID, but let's call the values out specifically + oldServiceId := rawState["signalr_service_id"].(string) + newServiceId := parsed.ID() + log.Printf("[DEBUG] Updating SignalR Service ID from %q to %q", oldServiceId, newServiceId) + rawState["signalr_service_id"] = newId + + return rawState, nil + } +} diff --git a/internal/services/signalr/migration/service_v0_to_v1.go b/internal/services/signalr/migration/service_v0_to_v1.go new file mode 100644 index 000000000000..35b433177478 --- /dev/null +++ b/internal/services/signalr/migration/service_v0_to_v1.go @@ -0,0 +1,200 @@ +package migration + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/sdk/2020-05-01/signalr" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +var _ pluginsdk.StateUpgrade = ServiceV0ToV1{} + +type ServiceV0ToV1 struct { +} + +func (s ServiceV0ToV1) Schema() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "location": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "sku": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "capacity": { + Type: pluginsdk.TypeInt, + Required: true, + }, + }, + }, + }, + + "features": { + Type: pluginsdk.TypeSet, + Optional: true, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "flag": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "value": { + Type: pluginsdk.TypeString, + Required: true, + }, + }, + }, + }, + + "upstream_endpoint": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "category_pattern": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "event_pattern": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "hub_pattern": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "url_template": { + Type: pluginsdk.TypeString, + Required: true, + }, + }, + }, + }, + + "cors": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "allowed_origins": { + Type: pluginsdk.TypeSet, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + + "hostname": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "ip_address": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "public_port": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + + "server_port": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + + "primary_access_key": { + Type: pluginsdk.TypeString, + Computed: true, + Sensitive: true, + }, + + "primary_connection_string": { + Type: pluginsdk.TypeString, + Computed: true, + Sensitive: true, + }, + + "secondary_access_key": { + Type: pluginsdk.TypeString, + Computed: true, + Sensitive: true, + }, + + "secondary_connection_string": { + Type: pluginsdk.TypeString, + Computed: true, + Sensitive: true, + }, + + "tags": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + } +} + +func (s ServiceV0ToV1) UpgradeFunc() pluginsdk.StateUpgraderFunc { + return func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Println("[DEBUG] Migrating SignalR from v0 to v1 format") + + // the old segment is `SignalR` but should be `signalR` + oldId := rawState["id"].(string) + parsed, err := signalr.ParseSignalRIDInsensitively(oldId) + if err != nil { + return rawState, fmt.Errorf("parsing Old Resource ID %q: %+v", oldId, err) + } + + newId := parsed.ID() + log.Printf("[DEBUG] Updating ID from %q to %q", oldId, newId) + rawState["id"] = newId + + return rawState, nil + } +} diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/constants.go b/internal/services/signalr/sdk/2020-05-01/signalr/constants.go index 1f982d19ce45..f6a3fdc741d6 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/constants.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/constants.go @@ -1,5 +1,7 @@ package signalr +import "strings" + type ACLAction string const ( @@ -7,6 +9,27 @@ const ( ACLActionDeny ACLAction = "Deny" ) +func PossibleValuesForACLAction() []string { + return []string{ + string(ACLActionAllow), + string(ACLActionDeny), + } +} + +func parseACLAction(input string) (*ACLAction, error) { + vals := map[string]ACLAction{ + "allow": ACLActionAllow, + "deny": ACLActionDeny, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ACLAction(input) + return &out, nil +} + type FeatureFlags string const ( @@ -15,6 +38,29 @@ const ( FeatureFlagsServiceMode FeatureFlags = "ServiceMode" ) +func PossibleValuesForFeatureFlags() []string { + return []string{ + string(FeatureFlagsEnableConnectivityLogs), + string(FeatureFlagsEnableMessagingLogs), + string(FeatureFlagsServiceMode), + } +} + +func parseFeatureFlags(input string) (*FeatureFlags, error) { + vals := map[string]FeatureFlags{ + "enableconnectivitylogs": FeatureFlagsEnableConnectivityLogs, + "enablemessaginglogs": FeatureFlagsEnableMessagingLogs, + "servicemode": FeatureFlagsServiceMode, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := FeatureFlags(input) + return &out, nil +} + type KeyType string const ( @@ -22,6 +68,27 @@ const ( KeyTypeSecondary KeyType = "Secondary" ) +func PossibleValuesForKeyType() []string { + return []string{ + string(KeyTypePrimary), + string(KeyTypeSecondary), + } +} + +func parseKeyType(input string) (*KeyType, error) { + vals := map[string]KeyType{ + "primary": KeyTypePrimary, + "secondary": KeyTypeSecondary, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyType(input) + return &out, nil +} + type PrivateLinkServiceConnectionStatus string const ( @@ -31,6 +98,31 @@ const ( PrivateLinkServiceConnectionStatusRejected PrivateLinkServiceConnectionStatus = "Rejected" ) +func PossibleValuesForPrivateLinkServiceConnectionStatus() []string { + return []string{ + string(PrivateLinkServiceConnectionStatusApproved), + string(PrivateLinkServiceConnectionStatusDisconnected), + string(PrivateLinkServiceConnectionStatusPending), + string(PrivateLinkServiceConnectionStatusRejected), + } +} + +func parsePrivateLinkServiceConnectionStatus(input string) (*PrivateLinkServiceConnectionStatus, error) { + vals := map[string]PrivateLinkServiceConnectionStatus{ + "approved": PrivateLinkServiceConnectionStatusApproved, + "disconnected": PrivateLinkServiceConnectionStatusDisconnected, + "pending": PrivateLinkServiceConnectionStatusPending, + "rejected": PrivateLinkServiceConnectionStatusRejected, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateLinkServiceConnectionStatus(input) + return &out, nil +} + type ProvisioningState string const ( @@ -45,6 +137,41 @@ const ( ProvisioningStateUpdating ProvisioningState = "Updating" ) +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateMoving), + string(ProvisioningStateRunning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUnknown), + string(ProvisioningStateUpdating), + } +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "moving": ProvisioningStateMoving, + "running": ProvisioningStateRunning, + "succeeded": ProvisioningStateSucceeded, + "unknown": ProvisioningStateUnknown, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + type ServiceKind string const ( @@ -52,6 +179,27 @@ const ( ServiceKindSignalR ServiceKind = "SignalR" ) +func PossibleValuesForServiceKind() []string { + return []string{ + string(ServiceKindRawWebSockets), + string(ServiceKindSignalR), + } +} + +func parseServiceKind(input string) (*ServiceKind, error) { + vals := map[string]ServiceKind{ + "rawwebsockets": ServiceKindRawWebSockets, + "signalr": ServiceKindSignalR, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceKind(input) + return &out, nil +} + type SignalRRequestType string const ( @@ -61,6 +209,31 @@ const ( SignalRRequestTypeTrace SignalRRequestType = "Trace" ) +func PossibleValuesForSignalRRequestType() []string { + return []string{ + string(SignalRRequestTypeClientConnection), + string(SignalRRequestTypeRESTAPI), + string(SignalRRequestTypeServerConnection), + string(SignalRRequestTypeTrace), + } +} + +func parseSignalRRequestType(input string) (*SignalRRequestType, error) { + vals := map[string]SignalRRequestType{ + "clientconnection": SignalRRequestTypeClientConnection, + "restapi": SignalRRequestTypeRESTAPI, + "serverconnection": SignalRRequestTypeServerConnection, + "trace": SignalRRequestTypeTrace, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SignalRRequestType(input) + return &out, nil +} + type SignalRSkuTier string const ( @@ -69,3 +242,28 @@ const ( SignalRSkuTierPremium SignalRSkuTier = "Premium" SignalRSkuTierStandard SignalRSkuTier = "Standard" ) + +func PossibleValuesForSignalRSkuTier() []string { + return []string{ + string(SignalRSkuTierBasic), + string(SignalRSkuTierFree), + string(SignalRSkuTierPremium), + string(SignalRSkuTierStandard), + } +} + +func parseSignalRSkuTier(input string) (*SignalRSkuTier, error) { + vals := map[string]SignalRSkuTier{ + "basic": SignalRSkuTierBasic, + "free": SignalRSkuTierFree, + "premium": SignalRSkuTierPremium, + "standard": SignalRSkuTierStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SignalRSkuTier(input) + return &out, nil +} diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_location.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_location.go index 05db42532df1..d26267c38243 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_location.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_location.go @@ -7,89 +7,105 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = LocationId{} + +// LocationId is a struct representing the Resource ID for a Location type LocationId struct { SubscriptionId string - Name string + Location string } -func NewLocationID(subscriptionId, name string) LocationId { +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, location string) LocationId { return LocationId{ SubscriptionId: subscriptionId, - Name: name, + Location: location, } } -func (id LocationId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Location", segmentsStr) -} - -func (id LocationId) ID() string { - fmtString := "/subscriptions/%s/providers/Microsoft.SignalRService/locations/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.Name) -} - -// ParseLocationID parses a Location ID into an LocationId struct +// ParseLocationID parses 'input' into a LocationId func ParseLocationID(input string) (*LocationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := LocationId{ - SubscriptionId: id.SubscriptionID, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := LocationId{} - if resourceId.Name, err = id.PopSegment("locations"); err != nil { - return nil, err + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseLocationIDInsensitively parses an Location ID into an LocationId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseLocationID method should be used instead for validation etc. +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input func ParseLocationIDInsensitively(input string) (*LocationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := LocationId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) } - // find the correct casing for the 'locations' segment - locationsKey := "locations" - for key := range id.Path { - if strings.EqualFold(key, locationsKey) { - locationsKey = key - break - } + return &id, nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(locationsKey); err != nil { - return nil, err + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.SignalRService/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.Location) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftSignalRService", "Microsoft.SignalRService", "Microsoft.SignalRService"), + resourceids.StaticSegment("locations", "locations", "locations"), + resourceids.UserSpecifiedSegment("location", "locationValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location: %q", id.Location), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_location_test.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_location_test.go index 288a1876d917..83d653586f0a 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_location_test.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_location_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = LocationId{} +var _ resourceids.ResourceId = LocationId{} -func TestLocationIDFormatter(t *testing.T) { - actual := NewLocationID("{subscriptionId}", "{location}").ID() - expected := "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}" +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.Location != "locationValue" { + t.Fatalf("Expected %q but got %q for Segment 'Location'", id.Location, "locationValue") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations/locationValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,50 @@ func TestParseLocationID(t *testing.T) { Error bool Expected *LocationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations/locationValue", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "locationValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/PROVIDERS/MICROSOFT.SIGNALRSERVICE/LOCATIONS/{LOCATION}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations/locationValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +87,7 @@ func TestParseLocationID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +96,11 @@ func TestParseLocationID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.Location != v.Expected.Location { + t.Fatalf("Expected %q but got %q for Location", v.Expected.Location, actual.Location) } + } } @@ -99,74 +110,88 @@ func TestParseLocationIDInsensitively(t *testing.T) { Error bool Expected *LocationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}", - Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}", - Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/LOCATIONS/{location}", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations/locationValue", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "locationValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/LoCaTiOnS/{location}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.SignalRService/locations/locationValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/lOcAtIoNs/lOcAtIoNvAlUe", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{location}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "lOcAtIoNvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/lOcAtIoNs/lOcAtIoNvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +201,7 @@ func TestParseLocationIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +210,10 @@ func TestParseLocationIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.Location != v.Expected.Location { + t.Fatalf("Expected %q but got %q for Location", v.Expected.Location, actual.Location) } + } } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection.go index b15c87804edc..1deb956dce58 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} + +// PrivateEndpointConnectionId is a struct representing the Resource ID for a Private Endpoint Connection type PrivateEndpointConnectionId struct { - SubscriptionId string - ResourceGroup string - SignalRName string - Name string + SubscriptionId string + ResourceGroupName string + ResourceName string + PrivateEndpointConnectionName string } -func NewPrivateEndpointConnectionID(subscriptionId, resourceGroup, signalRName, name string) PrivateEndpointConnectionId { +// NewPrivateEndpointConnectionID returns a new PrivateEndpointConnectionId struct +func NewPrivateEndpointConnectionID(subscriptionId string, resourceGroupName string, resourceName string, privateEndpointConnectionName string) PrivateEndpointConnectionId { return PrivateEndpointConnectionId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - SignalRName: signalRName, - Name: name, - } -} - -func (id PrivateEndpointConnectionId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Signal R Name %q", id.SignalRName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + PrivateEndpointConnectionName: privateEndpointConnectionName, } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Private Endpoint Connection", segmentsStr) -} - -func (id PrivateEndpointConnectionId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.SignalRService/SignalR/%s/privateEndpointConnections/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.SignalRName, id.Name) } -// ParsePrivateEndpointConnectionID parses a PrivateEndpointConnection ID into an PrivateEndpointConnectionId struct +// ParsePrivateEndpointConnectionID parses 'input' into a PrivateEndpointConnectionId func ParsePrivateEndpointConnectionID(input string) (*PrivateEndpointConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateEndpointConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := PrivateEndpointConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SignalRName, err = id.PopSegment("SignalR"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("privateEndpointConnections"); err != nil { - return nil, err + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParsePrivateEndpointConnectionIDInsensitively parses an PrivateEndpointConnection ID into an PrivateEndpointConnectionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParsePrivateEndpointConnectionID method should be used instead for validation etc. +// ParsePrivateEndpointConnectionIDInsensitively parses 'input' case-insensitively into a PrivateEndpointConnectionId +// note: this method should only be used for API response data and not user input func ParsePrivateEndpointConnectionIDInsensitively(input string) (*PrivateEndpointConnectionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateEndpointConnectionId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := PrivateEndpointConnectionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'SignalR' segment - SignalRKey := "SignalR" - for key := range id.Path { - if strings.EqualFold(key, SignalRKey) { - SignalRKey = key - break - } + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) } - if resourceId.SignalRName, err = id.PopSegment(SignalRKey); err != nil { - return nil, err + + if id.PrivateEndpointConnectionName, ok = parsed.Parsed["privateEndpointConnectionName"]; !ok { + return nil, fmt.Errorf("the segment 'privateEndpointConnectionName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateEndpointConnections' segment - privateEndpointConnectionsKey := "privateEndpointConnections" - for key := range id.Path { - if strings.EqualFold(key, privateEndpointConnectionsKey) { - privateEndpointConnectionsKey = key - break - } + return &id, nil +} + +// ValidatePrivateEndpointConnectionID checks that 'input' can be parsed as a Private Endpoint Connection ID +func ValidatePrivateEndpointConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(privateEndpointConnectionsKey); err != nil { - return nil, err + + if _, err := ParsePrivateEndpointConnectionID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.SignalRService/signalR/%s/privateEndpointConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName, id.PrivateEndpointConnectionName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftSignalRService", "Microsoft.SignalRService", "Microsoft.SignalRService"), + resourceids.StaticSegment("signalR", "signalR", "signalR"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + resourceids.StaticSegment("privateEndpointConnections", "privateEndpointConnections", "privateEndpointConnections"), + resourceids.UserSpecifiedSegment("privateEndpointConnectionName", "privateEndpointConnectionValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + fmt.Sprintf("Private Endpoint Connection Name: %q", id.PrivateEndpointConnectionName), + } + return fmt.Sprintf("Private Endpoint Connection (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection_test.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection_test.go index b175dbe1ce58..6827f2428d9f 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection_test.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_privateendpointconnection_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = PrivateEndpointConnectionId{} +var _ resourceids.ResourceId = PrivateEndpointConnectionId{} -func TestPrivateEndpointConnectionIDFormatter(t *testing.T) { - actual := NewPrivateEndpointConnectionID("{subscriptionId}", "{resourceGroupName}", "{resourceName}", "{privateEndpointConnectionName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}" +func TestNewPrivateEndpointConnectionID(t *testing.T) { + id := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "privateEndpointConnectionValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ResourceName != "resourceValue" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceName'", id.ResourceName, "resourceValue") + } + + if id.PrivateEndpointConnectionName != "privateEndpointConnectionValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateEndpointConnectionName'", id.PrivateEndpointConnectionName, "privateEndpointConnectionValue") + } +} + +func TestFormatPrivateEndpointConnectionID(t *testing.T) { + actual := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "privateEndpointConnectionValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections/privateEndpointConnectionValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { Error bool Expected *PrivateEndpointConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/privateEndpointConnections/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections/privateEndpointConnectionValue", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ResourceName: "resourceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.SIGNALRSERVICE/SIGNALR/{RESOURCENAME}/PRIVATEENDPOINTCONNECTIONS/{PRIVATEENDPOINTCONNECTIONNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParsePrivateEndpointConnectionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.SignalRName != v.Expected.SignalRName { - t.Fatalf("Expected %q but got %q for SignalRName", v.Expected.SignalRName, actual.SignalRName) + + if actual.ResourceName != v.Expected.ResourceName { + t.Fatalf("Expected %q but got %q for ResourceName", v.Expected.ResourceName, actual.ResourceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) } + } } @@ -131,106 +148,132 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { Error bool Expected *PrivateEndpointConnectionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/privateEndpointConnections/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", - Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - Name: "{privateEndpointConnectionName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.signalrService/SignalR/{resourceName}/privateendpointconnections/{privateEndpointConnectionName}", - Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - Name: "{privateEndpointConnectionName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SIGNALRService/SignalR/{resourceName}/PRIVATEENDPOINTCONNECTIONS/{privateEndpointConnectionName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr/rEsOuRcEvAlUe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr/rEsOuRcEvAlUe/pRiVaTeEnDpOiNtCoNnEcTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections/privateEndpointConnectionValue", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ResourceName: "resourceValue", + PrivateEndpointConnectionName: "privateEndpointConnectionValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SiGnAlRService/SignalR/{resourceName}/PrIvAtEeNdPoInTcOnNeCtIoNs/{privateEndpointConnectionName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/privateEndpointConnections/privateEndpointConnectionValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr/rEsOuRcEvAlUe/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", Expected: &PrivateEndpointConnectionId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - Name: "{privateEndpointConnectionName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ResourceName: "rEsOuRcEvAlUe", + PrivateEndpointConnectionName: "pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr/rEsOuRcEvAlUe/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,18 @@ func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.SignalRName != v.Expected.SignalRName { - t.Fatalf("Expected %q but got %q for SignalRName", v.Expected.SignalRName, actual.SignalRName) + + if actual.ResourceName != v.Expected.ResourceName { + t.Fatalf("Expected %q but got %q for ResourceName", v.Expected.ResourceName, actual.ResourceName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) } + } } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup.go index 7bcbabb104de..389d49132321 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup_test.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup_test.go index 3de2f560ab58..4d7347a3e4ac 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup_test.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,10 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr.go index 9f0a3e4a8453..007ec6a5a3be 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SignalRId{} + +// SignalRId is a struct representing the Resource ID for a Signal R type SignalRId struct { - SubscriptionId string - ResourceGroup string - SignalRName string + SubscriptionId string + ResourceGroupName string + ResourceName string } -func NewSignalRID(subscriptionId, resourceGroup, signalRName string) SignalRId { +// NewSignalRID returns a new SignalRId struct +func NewSignalRID(subscriptionId string, resourceGroupName string, resourceName string) SignalRId { return SignalRId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - SignalRName: signalRName, - } -} - -func (id SignalRId) String() string { - segments := []string{ - fmt.Sprintf("Signal R Name %q", id.SignalRName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Signal R", segmentsStr) -} - -func (id SignalRId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.SignalRService/SignalR/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.SignalRName) } -// ParseSignalRID parses a SignalR ID into an SignalRId struct +// ParseSignalRID parses 'input' into a SignalRId func ParseSignalRID(input string) (*SignalRId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SignalRId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := SignalRId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := SignalRId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SignalRName, err = id.PopSegment("SignalR"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseSignalRIDInsensitively parses an SignalR ID into an SignalRId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSignalRID method should be used instead for validation etc. +// ParseSignalRIDInsensitively parses 'input' case-insensitively into a SignalRId +// note: this method should only be used for API response data and not user input func ParseSignalRIDInsensitively(input string) (*SignalRId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SignalRId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SignalRId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := SignalRId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) } - // find the correct casing for the 'SignalR' segment - SignalRKey := "SignalR" - for key := range id.Path { - if strings.EqualFold(key, SignalRKey) { - SignalRKey = key - break - } + return &id, nil +} + +// ValidateSignalRID checks that 'input' can be parsed as a Signal R ID +func ValidateSignalRID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.SignalRName, err = id.PopSegment(SignalRKey); err != nil { - return nil, err + + if _, err := ParseSignalRID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Signal R ID +func (id SignalRId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.SignalRService/signalR/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Signal R ID +func (id SignalRId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("resourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("providers", "providers", "providers"), + resourceids.ResourceProviderSegment("microsoftSignalRService", "Microsoft.SignalRService", "Microsoft.SignalRService"), + resourceids.StaticSegment("signalR", "signalR", "signalR"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Signal R ID +func (id SignalRId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + } + return fmt.Sprintf("Signal R (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr_test.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr_test.go index cb195b1b3bd8..30d8f7da8e11 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr_test.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_signalr_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SignalRId{} +var _ resourceids.ResourceId = SignalRId{} -func TestSignalRIDFormatter(t *testing.T) { - actual := NewSignalRID("{subscriptionId}", "{resourceGroupName}", "{resourceName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}" +func TestNewSignalRID(t *testing.T) { + id := NewSignalRID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ResourceName != "resourceValue" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceName'", id.ResourceName, "resourceValue") + } +} + +func TestFormatSignalRID(t *testing.T) { + actual := NewSignalRID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseSignalRID(t *testing.T) { Error bool Expected *SignalRId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue", Expected: &SignalRId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ResourceName: "resourceValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.SIGNALRSERVICE/SIGNALR/{RESOURCENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseSignalRID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseSignalRID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.SignalRName != v.Expected.SignalRName { - t.Fatalf("Expected %q but got %q for SignalRName", v.Expected.SignalRName, actual.SignalRName) + + if actual.ResourceName != v.Expected.ResourceName { + t.Fatalf("Expected %q but got %q for ResourceName", v.Expected.ResourceName, actual.ResourceName) } + } } @@ -115,90 +129,110 @@ func TestParseSignalRIDInsensitively(t *testing.T) { Error bool Expected *SignalRId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for SignalRName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/SignalR/{resourceName}", - Expected: &SignalRId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.signalrService/SignalR/{resourceName}", - Expected: &SignalRId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SIGNALRService/SignalR/{resourceName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue", Expected: &SignalRId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ResourceName: "resourceValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SiGnAlRService/SignalR/{resourceName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SignalRService/signalR/resourceValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr/rEsOuRcEvAlUe", Expected: &SignalRId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - SignalRName: "{resourceName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ResourceName: "rEsOuRcEvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sIgNaLrSeRvIcE/sIgNaLr/rEsOuRcEvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseSignalRIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,14 @@ func TestParseSignalRIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.SignalRName != v.Expected.SignalRName { - t.Fatalf("Expected %q but got %q for SignalRName", v.Expected.SignalRName, actual.SignalRName) + + if actual.ResourceName != v.Expected.ResourceName { + t.Fatalf("Expected %q but got %q for ResourceName", v.Expected.ResourceName, actual.ResourceName) } + } } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription.go index 68c562283521..6b09c60b3698 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("subscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription_test.go b/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription_test.go index cc0160d19ffc..134fd1ddb41c 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription_test.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,6 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/method_listbyresourcegroup_autorest.go b/internal/services/signalr/sdk/2020-05-01/signalr/method_listbyresourcegroup_autorest.go index 21a0f311b0ae..f7428dc3c560 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/method_listbyresourcegroup_autorest.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/method_listbyresourcegroup_autorest.go @@ -110,7 +110,7 @@ func (c SignalRClient) preparerForListByResourceGroup(ctx context.Context, id Re autorest.AsContentType("application/json; charset=utf-8"), autorest.AsGet(), autorest.WithBaseURL(c.baseUri), - autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.SignalRService/SignalR", id.ID())), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.SignalRService/signalR", id.ID())), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/method_listbysubscription_autorest.go b/internal/services/signalr/sdk/2020-05-01/signalr/method_listbysubscription_autorest.go index 0a0291562c2b..405bb779048b 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/method_listbysubscription_autorest.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/method_listbysubscription_autorest.go @@ -110,7 +110,7 @@ func (c SignalRClient) preparerForListBySubscription(ctx context.Context, id Sub autorest.AsContentType("application/json; charset=utf-8"), autorest.AsGet(), autorest.WithBaseURL(c.baseUri), - autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.SignalRService/SignalR", id.ID())), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.SignalRService/signalR", id.ID())), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } diff --git a/internal/services/signalr/sdk/2020-05-01/signalr/predicates.go b/internal/services/signalr/sdk/2020-05-01/signalr/predicates.go index e4c105c5a9cd..b5ff818357a7 100644 --- a/internal/services/signalr/sdk/2020-05-01/signalr/predicates.go +++ b/internal/services/signalr/sdk/2020-05-01/signalr/predicates.go @@ -7,6 +7,7 @@ type PrivateLinkResourcePredicate struct { } func (p PrivateLinkResourcePredicate) Matches(input PrivateLinkResource) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } @@ -30,6 +31,7 @@ type SignalRResourcePredicate struct { } func (p SignalRResourcePredicate) Matches(input SignalRResource) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } @@ -57,6 +59,7 @@ type SignalRUsagePredicate struct { } func (p SignalRUsagePredicate) Matches(input SignalRUsage) bool { + if p.CurrentValue != nil && (input.CurrentValue == nil && *p.CurrentValue != *input.CurrentValue) { return false } diff --git a/internal/services/signalr/signalr_service_data_source.go b/internal/services/signalr/signalr_service_data_source.go index 7ed93dedac9e..5a407dd564d0 100644 --- a/internal/services/signalr/signalr_service_data_source.go +++ b/internal/services/signalr/signalr_service_data_source.go @@ -107,8 +107,8 @@ func dataSourceArmSignalRServiceRead(d *pluginsdk.ResourceData, meta interface{} d.SetId(id.ID()) - d.Set("name", id.SignalRName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.ResourceName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", location.NormalizeNilable(model.Location)) diff --git a/internal/services/signalr/signalr_service_network_acl_resource.go b/internal/services/signalr/signalr_service_network_acl_resource.go index 036444b4758f..c1b4f2fa3e03 100644 --- a/internal/services/signalr/signalr_service_network_acl_resource.go +++ b/internal/services/signalr/signalr_service_network_acl_resource.go @@ -8,8 +8,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/locks" networkValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/sdk/2020-05-01/signalr" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" @@ -30,6 +30,11 @@ func resourceArmSignalRServiceNetworkACL() *pluginsdk.Resource { Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, + StateUpgraders: pluginsdk.StateUpgrades(map[int]pluginsdk.StateUpgrade{ + 0: migration.NetworkAclV0ToV1{}, + }), + SchemaVersion: 1, + Importer: pluginsdk.DefaultImporter(), Schema: map[string]*pluginsdk.Schema{ @@ -37,7 +42,7 @@ func resourceArmSignalRServiceNetworkACL() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ForceNew: true, - ValidateFunc: validate.ServiceID, + ValidateFunc: signalr.ValidateSignalRID, }, "default_action": { @@ -143,8 +148,8 @@ func resourceSignalRServiceNetworkACLCreateUpdate(d *pluginsdk.ResourceData, met return err } - locks.ByName(id.SignalRName, "azurerm_signalr_service") - defer locks.UnlockByName(id.SignalRName, "azurerm_signalr_service") + locks.ByName(id.ResourceName, "azurerm_signalr_service") + defer locks.UnlockByName(id.ResourceName, "azurerm_signalr_service") resp, err := client.Get(ctx, *id) if err != nil { @@ -249,8 +254,8 @@ func resourceSignalRServiceNetworkACLDelete(d *pluginsdk.ResourceData, meta inte return err } - locks.ByName(id.SignalRName, "azurerm_signalr_service") - defer locks.UnlockByName(id.SignalRName, "azurerm_signalr_service") + locks.ByName(id.ResourceName, "azurerm_signalr_service") + defer locks.UnlockByName(id.ResourceName, "azurerm_signalr_service") resp, err := client.Get(ctx, *id) if err != nil { diff --git a/internal/services/signalr/signalr_service_resource.go b/internal/services/signalr/signalr_service_resource.go index 2f1811547ec6..ea57b86df303 100644 --- a/internal/services/signalr/signalr_service_resource.go +++ b/internal/services/signalr/signalr_service_resource.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/location" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/sdk/2020-05-01/signalr" signalrValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/signalr/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" @@ -34,6 +35,11 @@ func resourceArmSignalRService() *pluginsdk.Resource { Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, + StateUpgraders: pluginsdk.StateUpgrades(map[int]pluginsdk.StateUpgrade{ + 0: migration.ServiceV0ToV1{}, + }), + SchemaVersion: 1, + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { _, err := signalr.ParseSignalRID(id) return err @@ -283,8 +289,8 @@ func resourceArmSignalRServiceRead(d *pluginsdk.ResourceData, meta interface{}) return fmt.Errorf("listing keys for %s: %+v", *id, err) } - d.Set("name", id.SignalRName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.ResourceName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", location.NormalizeNilable(model.Location)) diff --git a/internal/services/sql/sql_managed_instance_resource.go b/internal/services/sql/sql_managed_instance_resource.go index 0936f2d0d493..6acfb90ae2ab 100644 --- a/internal/services/sql/sql_managed_instance_resource.go +++ b/internal/services/sql/sql_managed_instance_resource.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/identity" "github.com/hashicorp/terraform-provider-azurerm/internal/services/mssql/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/services/sql/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tags" @@ -22,6 +23,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/utils" ) +type managedInstanceIdentity = identity.SystemAssigned + func resourceArmSqlMiServer() *schema.Resource { return &schema.Resource{ Create: resourceArmSqlMiServerCreateUpdate, @@ -169,13 +172,22 @@ func resourceArmSqlMiServer() *schema.Resource { ValidateFunc: azure.ValidateResourceID, }, + "identity": managedInstanceIdentity{}.Schema(), + "tags": tags.Schema(), }, - CustomizeDiff: pluginsdk.ForceNewIfChange("dns_zone_partner_id", func(ctx context.Context, old, new, _ interface{}) bool { + CustomizeDiff: pluginsdk.CustomDiffWithAll( // dns_zone_partner_id can only be set on init - return old.(string) == "" && new.(string) != "" - }), + pluginsdk.ForceNewIfChange("dns_zone_partner_id", func(ctx context.Context, old, new, _ interface{}) bool { + return old.(string) == "" && new.(string) != "" + }), + + // identity.0.type can be set to SystemAssigned, but not removed or set to None in this SDK version + pluginsdk.ForceNewIfChange("identity.0.type", func(ctx context.Context, old, new, _ interface{}) bool { + return old.(string) == "SystemAssigned" && new.(string) != "SystemAssigned" + }), + ), } } @@ -227,6 +239,12 @@ func resourceArmSqlMiServerCreateUpdate(d *schema.ResourceData, meta interface{} }, } + identity, err := expandManagedInstanceIdentity(d.Get("identity").([]interface{})) + if err != nil { + return fmt.Errorf(`expanding "identity": %v`, err) + } + parameters.Identity = identity + future, err := client.CreateOrUpdate(ctx, resGroup, name, parameters) if err != nil { return err @@ -277,6 +295,10 @@ func resourceArmSqlMiServerRead(d *schema.ResourceData, meta interface{}) error d.Set("sku_name", sku.Name) } + if err := d.Set("identity", flattenManagedInstanceIdentity(resp.Identity)); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } + if props := resp.ManagedInstanceProperties; props != nil { d.Set("license_type", props.LicenseType) d.Set("administrator_login", props.AdministratorLogin) @@ -336,3 +358,43 @@ func expandManagedInstanceSkuName(skuName string) (*sql.Sku, error) { Family: utils.String(parts[1]), }, nil } + +func expandManagedInstanceIdentity(input []interface{}) (*sql.ResourceIdentity, error) { + config, err := managedInstanceIdentity{}.Expand(input) + if err != nil { + return nil, err + } + + if config.Type == identity.Type("None") { + return nil, nil + } + + return &sql.ResourceIdentity{ + Type: sql.IdentityType(config.Type), + }, nil +} + +func flattenManagedInstanceIdentity(input *sql.ResourceIdentity) []interface{} { + var config *identity.ExpandedConfig + + if input == nil { + return []interface{}{} + } + + principalId := "" + if input.PrincipalID != nil { + principalId = input.PrincipalID.String() + } + + tenantId := "" + if input.TenantID != nil { + tenantId = input.TenantID.String() + } + + config = &identity.ExpandedConfig{ + Type: identity.Type(string(input.Type)), + PrincipalId: principalId, + TenantId: tenantId, + } + return managedInstanceIdentity{}.Flatten(config) +} diff --git a/internal/services/sql/sql_managed_instance_resource_test.go b/internal/services/sql/sql_managed_instance_resource_test.go index 13cbc1b09de5..ff31dc117d8f 100644 --- a/internal/services/sql/sql_managed_instance_resource_test.go +++ b/internal/services/sql/sql_managed_instance_resource_test.go @@ -31,6 +31,31 @@ func TestAccAzureRMSqlMiServer_basic(t *testing.T) { }) } +func TestAccAzureRMSqlMiServer_identity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_sql_managed_instance", "test") + r := SqlManagedInstanceResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.#").HasValue("0"), + ), + }, + data.ImportStep("administrator_login_password"), + { + Config: r.identity(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.#").HasValue("1"), + check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), + ), + }, + data.ImportStep("administrator_login_password"), + }) +} + func TestAccAzureRMSqlMiServer_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_sql_managed_instance", "test") r := SqlManagedInstanceResource{} @@ -164,6 +189,39 @@ resource "azurerm_sql_managed_instance" "test" { `, r.template(data), data.RandomInteger) } +func (r SqlManagedInstanceResource) identity(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_managed_instance" "test" { + name = "acctestsqlserver%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "mradministrator" + administrator_login_password = "thisIsDog11" + license_type = "BasePrice" + subnet_id = azurerm_subnet.test.id + sku_name = "GP_Gen5" + vcores = 4 + storage_size_in_gb = 32 + + depends_on = [ + azurerm_subnet_network_security_group_association.test, + azurerm_subnet_route_table_association.test, + ] + + identity { + type = "SystemAssigned" + } + + tags = { + environment = "staging" + database = "test" + } +} +`, r.template(data), data.RandomInteger) +} + func (r SqlManagedInstanceResource) update(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -455,7 +513,7 @@ resource "azurerm_subnet" "test" { name = "subnet-%[1]d" resource_group_name = azurerm_resource_group.test.name virtual_network_name = azurerm_virtual_network.test.name - address_prefix = "10.0.0.0/24" + address_prefixes = ["10.0.0.0/24"] delegation { name = "managedinstancedelegation" @@ -1479,7 +1537,7 @@ resource "azurerm_subnet" "test1" { name = "subnet2-%[1]d" resource_group_name = azurerm_resource_group.test1.name virtual_network_name = azurerm_virtual_network.test1.name - address_prefix = "10.1.0.0/24" + address_prefixes = ["10.1.0.0/24"] delegation { name = "managedinstancedelegation" diff --git a/internal/services/storage/client/client.go b/internal/services/storage/client/client.go index c6223498ae00..52d3ecc8f78b 100644 --- a/internal/services/storage/client/client.go +++ b/internal/services/storage/client/client.go @@ -4,7 +4,8 @@ import ( "context" "fmt" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + legacystorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/Azure/azure-sdk-for-go/services/storagesync/mgmt/2020-03-01/storagesync" "github.com/Azure/go-autorest/autorest" az "github.com/Azure/go-autorest/autorest/azure" @@ -29,7 +30,7 @@ type Client struct { ADLSGen2PathsClient *paths.Client ManagementPoliciesClient *storage.ManagementPoliciesClient BlobServicesClient *storage.BlobServicesClient - BlobInventoryPoliciesClient *storage.BlobInventoryPoliciesClient + BlobInventoryPoliciesClient *legacystorage.BlobInventoryPoliciesClient CloudEndpointsClient *storagesync.CloudEndpointsClient EncryptionScopesClient *storage.EncryptionScopesClient Environment az.Environment @@ -59,7 +60,7 @@ func NewClient(options *common.ClientOptions) *Client { blobServicesClient := storage.NewBlobServicesClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) options.ConfigureClient(&blobServicesClient.Client, options.ResourceManagerAuthorizer) - blobInventoryPoliciesClient := storage.NewBlobInventoryPoliciesClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) + blobInventoryPoliciesClient := legacystorage.NewBlobInventoryPoliciesClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) options.ConfigureClient(&blobInventoryPoliciesClient.Client, options.ResourceManagerAuthorizer) cloudEndpointsClient := storagesync.NewCloudEndpointsClientWithBaseURI(options.ResourceManagerEndpoint, options.SubscriptionId) diff --git a/internal/services/storage/client/helpers.go b/internal/services/storage/client/helpers.go index 6120df95c44b..450830ff7495 100644 --- a/internal/services/storage/client/helpers.go +++ b/internal/services/storage/client/helpers.go @@ -6,7 +6,7 @@ import ( "log" "sync" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" ) @@ -35,7 +35,7 @@ func (ad *accountDetails) AccountKey(ctx context.Context, client Client) (*strin } log.Printf("[DEBUG] Cache Miss - looking up the account key for storage account %q..", ad.name) - props, err := client.AccountsClient.ListKeys(ctx, ad.ResourceGroup, ad.name, storage.Kerb) + props, err := client.AccountsClient.ListKeys(ctx, ad.ResourceGroup, ad.name, storage.ListKeyExpandKerb) if err != nil { return nil, fmt.Errorf("Listing Keys for Storage Account %q (Resource Group %q): %+v", ad.name, ad.ResourceGroup, err) } diff --git a/internal/services/storage/storage_account_customer_managed_key_resource.go b/internal/services/storage/storage_account_customer_managed_key_resource.go index f1dd1421daf4..b0efa043a5e0 100644 --- a/internal/services/storage/storage_account_customer_managed_key_resource.go +++ b/internal/services/storage/storage_account_customer_managed_key_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/locks" diff --git a/internal/services/storage/storage_account_customer_managed_key_resource_test.go b/internal/services/storage/storage_account_customer_managed_key_resource_test.go index 5fdaf14ac57d..f9122422c5f9 100644 --- a/internal/services/storage/storage_account_customer_managed_key_resource_test.go +++ b/internal/services/storage/storage_account_customer_managed_key_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/storage/storage_account_data_source.go b/internal/services/storage/storage_account_data_source.go index 363e184fd59f..f51db7b93e53 100644 --- a/internal/services/storage/storage_account_data_source.go +++ b/internal/services/storage/storage_account_data_source.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" azautorest "github.com/Azure/go-autorest/autorest" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -287,7 +287,7 @@ func dataSourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) e d.Set("primary_access_key", "") d.Set("secondary_access_key", "") - keys, err := client.ListKeys(ctx, resourceGroup, name, storage.Kerb) + keys, err := client.ListKeys(ctx, resourceGroup, name, storage.ListKeyExpandKerb) if err != nil { // the API returns a 200 with an inner error of a 409.. var hasWriteLock bool diff --git a/internal/services/storage/storage_account_network_rules_resource.go b/internal/services/storage/storage_account_network_rules_resource.go index 84ee826abe11..f8c4d8fa4cdd 100644 --- a/internal/services/storage/storage_account_network_rules_resource.go +++ b/internal/services/storage/storage_account_network_rules_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -94,10 +94,10 @@ func resourceStorageAccountNetworkRules() *pluginsdk.Resource { Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(storage.AzureServices), - string(storage.Logging), - string(storage.Metrics), - string(storage.None), + string(storage.BypassAzureServices), + string(storage.BypassLogging), + string(storage.BypassMetrics), + string(storage.BypassNone), }, false), }, Set: pluginsdk.HashString, @@ -306,7 +306,7 @@ func resourceStorageAccountNetworkRulesDelete(d *pluginsdk.ResourceData, meta in opts := storage.AccountUpdateParameters{ AccountPropertiesUpdateParameters: &storage.AccountPropertiesUpdateParameters{ NetworkRuleSet: &storage.NetworkRuleSet{ - Bypass: storage.AzureServices, + Bypass: storage.BypassAzureServices, DefaultAction: storage.DefaultActionAllow, }, }, @@ -350,7 +350,7 @@ func expandStorageAccountNetworkRuleIpRules(ipRulesInfo []interface{}) *[]storag attrs := ipRuleConfig.(string) ipRule := storage.IPRule{ IPAddressOrRange: utils.String(attrs), - Action: storage.Allow, + Action: storage.ActionAllow, } ipRules[i] = ipRule } @@ -365,7 +365,7 @@ func expandStorageAccountNetworkRuleVirtualRules(virtualNetworkInfo []interface{ attrs := virtualNetworkConfig.(string) virtualNetwork := storage.VirtualNetworkRule{ VirtualNetworkResourceID: utils.String(attrs), - Action: storage.Allow, + Action: storage.ActionAllow, } virtualNetworks[i] = virtualNetwork } diff --git a/internal/services/storage/storage_account_resource.go b/internal/services/storage/storage_account_resource.go index 2c30f8d5a0ab..72c143b0890e 100644 --- a/internal/services/storage/storage_account_resource.go +++ b/internal/services/storage/storage_account_resource.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" azautorest "github.com/Azure/go-autorest/autorest" autorestAzure "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/go-azure-helpers/lang/response" @@ -78,13 +78,13 @@ func resourceStorageAccount() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(storage.Storage), - string(storage.BlobStorage), - string(storage.BlockBlobStorage), - string(storage.FileStorage), - string(storage.StorageV2), + string(storage.KindStorage), + string(storage.KindBlobStorage), + string(storage.KindBlockBlobStorage), + string(storage.KindFileStorage), + string(storage.KindStorageV2), }, true), - Default: string(storage.StorageV2), + Default: string(storage.KindStorageV2), }, "account_tier": { @@ -118,8 +118,8 @@ func resourceStorageAccount() *pluginsdk.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(storage.Cool), - string(storage.Hot), + string(storage.AccessTierCool), + string(storage.AccessTierHot), }, true), }, @@ -216,11 +216,11 @@ func resourceStorageAccount() *pluginsdk.Resource { "min_tls_version": { Type: pluginsdk.TypeString, Optional: true, - Default: string(storage.TLS10), + Default: string(storage.MinimumTLSVersionTLS10), ValidateFunc: validation.StringInSlice([]string{ - string(storage.TLS10), - string(storage.TLS11), - string(storage.TLS12), + string(storage.MinimumTLSVersionTLS10), + string(storage.MinimumTLSVersionTLS11), + string(storage.MinimumTLSVersionTLS12), }, false), }, @@ -265,10 +265,10 @@ func resourceStorageAccount() *pluginsdk.Resource { Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(storage.AzureServices), - string(storage.Logging), - string(storage.Metrics), - string(storage.None), + string(storage.BypassAzureServices), + string(storage.BypassLogging), + string(storage.BypassMetrics), + string(storage.BypassNone), }, true), }, Set: pluginsdk.HashString, @@ -552,10 +552,10 @@ func resourceStorageAccount() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(storage.MicrosoftRouting), - string(storage.InternetRouting), + string(storage.RoutingChoiceMicrosoftRouting), + string(storage.RoutingChoiceInternetRouting), }, false), - Default: string(storage.MicrosoftRouting), + Default: string(storage.RoutingChoiceMicrosoftRouting), }, }, }, @@ -855,7 +855,7 @@ func resourceStorageAccount() *pluginsdk.Resource { if d.HasChange("account_kind") { accountKind, changedKind := d.GetChange("account_kind") - if accountKind != string(storage.Storage) && changedKind != string(storage.StorageV2) { + if accountKind != string(storage.KindStorage) && changedKind != string(storage.KindStorageV2) { log.Printf("[DEBUG] recreate storage account, could't be migrated from %s to %s", accountKind, changedKind) d.ForceNew("account_kind") } else { @@ -953,7 +953,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e // USGovernmentCloud allow_blob_public_access and min_tls_version allowed as of issue 9128 // https://github.com/hashicorp/terraform-provider-azurerm/issues/9128 if envName != autorestAzure.PublicCloud.Name && envName != autorestAzure.USGovernmentCloud.Name && envName != autorestAzure.ChinaCloud.Name { - if allowBlobPublicAccess || minimumTLSVersion != string(storage.TLS10) { + if allowBlobPublicAccess || minimumTLSVersion != string(storage.MinimumTLSVersionTLS10) { return fmt.Errorf(`%q and "min_tls_version" are not supported for a Storage Account located in %q`, allowPublicNestedItemsName, envName) } } else { @@ -980,30 +980,30 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e } // BlobStorage does not support ZRS - if accountKind == string(storage.BlobStorage) { - if string(parameters.Sku.Name) == string(storage.StandardZRS) { + if accountKind == string(storage.KindBlobStorage) { + if string(parameters.Sku.Name) == string(storage.SkuNameStandardZRS) { return fmt.Errorf("A `account_replication_type` of `ZRS` isn't supported for Blob Storage accounts.") } } // AccessTier is only valid for BlobStorage, StorageV2, and FileStorage accounts - if accountKind == string(storage.BlobStorage) || accountKind == string(storage.StorageV2) || accountKind == string(storage.FileStorage) { + if accountKind == string(storage.KindBlobStorage) || accountKind == string(storage.KindStorageV2) || accountKind == string(storage.KindFileStorage) { accessTier, ok := d.GetOk("access_tier") if !ok { // default to "Hot" - accessTier = string(storage.Hot) + accessTier = string(storage.AccessTierHot) } parameters.AccountPropertiesCreateParameters.AccessTier = storage.AccessTier(accessTier.(string)) - } else if isHnsEnabled && accountKind != string(storage.BlockBlobStorage) { + } else if isHnsEnabled && accountKind != string(storage.KindBlockBlobStorage) { return fmt.Errorf("`is_hns_enabled` can only be used with account kinds `StorageV2`, `BlobStorage` and `BlockBlobStorage`") } // NFSv3 is supported for standard general-purpose v2 storage accounts and for premium block blob storage accounts. // (https://docs.microsoft.com/en-us/azure/storage/blobs/network-file-system-protocol-support-how-to#step-5-create-and-configure-a-storage-account) if nfsV3Enabled && - !((accountTier == string(storage.Premium) && accountKind == string(storage.BlockBlobStorage)) || - (accountTier == string(storage.Standard) && accountKind == string(storage.StorageV2))) { + !((accountTier == string(storage.SkuTierPremium) && accountKind == string(storage.KindBlockBlobStorage)) || + (accountTier == string(storage.SkuTierStandard) && accountKind == string(storage.KindStorageV2))) { return fmt.Errorf("`nfsv3_enabled` can only be used with account tier `Standard` and account kind `StorageV2`, or account tier `Premium` and account kind `BlockBlobStorage`") } if nfsV3Enabled && !isHnsEnabled { @@ -1011,8 +1011,8 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e } // AccountTier must be Premium for FileStorage - if accountKind == string(storage.FileStorage) { - if string(parameters.Sku.Tier) == string(storage.StandardLRS) { + if accountKind == string(storage.KindFileStorage) { + if string(parameters.Sku.Tier) == string(storage.SkuNameStandardLRS) { return fmt.Errorf("A `account_tier` of `Standard` is not supported for FileStorage accounts.") } } @@ -1044,7 +1044,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e if val, ok := d.GetOk("blob_properties"); ok { // FileStorage does not support blob settings - if accountKind != string(storage.FileStorage) { + if accountKind != string(storage.KindFileStorage) { blobClient := meta.(*clients.Client).Storage.BlobServicesClient blobProperties := expandBlobProperties(val.([]interface{})) @@ -1097,7 +1097,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e if val, ok := d.GetOk("share_properties"); ok { // BlobStorage does not support file share settings // FileStorage Premium is supported - if accountKind == string(storage.FileStorage) || accountKind != string(storage.BlobStorage) && accountKind != string(storage.BlockBlobStorage) && accountTier != string(storage.Premium) { + if accountKind == string(storage.KindFileStorage) || accountKind != string(storage.KindBlobStorage) && accountKind != string(storage.KindBlockBlobStorage) && accountTier != string(storage.SkuTierPremium) { fileServiceClient := meta.(*clients.Client).Storage.FileServicesClient if _, err = fileServiceClient.SetServiceProperties(ctx, resourceGroupName, storageAccountName, expandShareProperties(val.([]interface{}))); err != nil { @@ -1110,7 +1110,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e if val, ok := d.GetOk("static_website"); ok { // static website only supported on StorageV2 and BlockBlobStorage - if accountKind != string(storage.StorageV2) && accountKind != string(storage.BlockBlobStorage) { + if accountKind != string(storage.KindStorageV2) && accountKind != string(storage.KindBlockBlobStorage) { return fmt.Errorf("`static_website` is only supported for StorageV2 and BlockBlobStorage.") } storageClient := meta.(*clients.Client).Storage @@ -1161,8 +1161,8 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e storageType := fmt.Sprintf("%s_%s", accountTier, replicationType) accountKind := d.Get("account_kind").(string) - if accountKind == string(storage.BlobStorage) { - if storageType == string(storage.StandardZRS) { + if accountKind == string(storage.KindBlobStorage) { + if storageType == string(storage.SkuNameStandardZRS) { return fmt.Errorf("A `account_replication_type` of `ZRS` isn't supported for Blob Storage accounts.") } } @@ -1286,7 +1286,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e // USGovernmentCloud "min_tls_version" allowed as of issue 9128 // https://github.com/hashicorp/terraform-provider-azurerm/issues/9128 if envName != autorestAzure.PublicCloud.Name && envName != autorestAzure.USGovernmentCloud.Name && envName != autorestAzure.ChinaCloud.Name { - if minimumTLSVersion != string(storage.TLS10) { + if minimumTLSVersion != string(storage.MinimumTLSVersionTLS10) { return fmt.Errorf(`"min_tls_version" is not supported for a Storage Account located in %q`, envName) } } else { @@ -1415,7 +1415,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e if d.HasChange("blob_properties") { // FileStorage does not support blob settings - if accountKind != string(storage.FileStorage) { + if accountKind != string(storage.KindFileStorage) { blobClient := meta.(*clients.Client).Storage.BlobServicesClient blobProperties := expandBlobProperties(d.Get("blob_properties").([]interface{})) @@ -1471,7 +1471,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e if d.HasChange("share_properties") { // BlobStorage, BlockBlobStorage does not support file share settings // FileStorage Premium is supported - if accountKind == string(storage.FileStorage) || accountKind != string(storage.BlobStorage) && accountKind != string(storage.BlockBlobStorage) && accountTier != string(storage.Premium) { + if accountKind == string(storage.KindFileStorage) || accountKind != string(storage.KindBlobStorage) && accountKind != string(storage.KindBlockBlobStorage) && accountTier != string(storage.SkuTierPremium) { fileServiceClient := meta.(*clients.Client).Storage.FileServicesClient if _, err = fileServiceClient.SetServiceProperties(ctx, resourceGroupName, storageAccountName, expandShareProperties(d.Get("share_properties").([]interface{}))); err != nil { @@ -1484,7 +1484,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e if d.HasChange("static_website") { // static website only supported on StorageV2 and BlockBlobStorage - if accountKind != string(storage.StorageV2) && accountKind != string(storage.BlockBlobStorage) { + if accountKind != string(storage.KindStorageV2) && accountKind != string(storage.KindBlockBlobStorage) { return fmt.Errorf("`static_website` is only supported for StorageV2 and BlockBlobStorage.") } storageClient := meta.(*clients.Client).Storage @@ -1542,7 +1542,7 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err d.Set("primary_access_key", "") d.Set("secondary_access_key", "") - keys, err := client.ListKeys(ctx, resourceGroupName, storageAccountName, storage.Kerb) + keys, err := client.ListKeys(ctx, resourceGroupName, storageAccountName, storage.ListKeyExpandKerb) if err != nil { // the API returns a 200 with an inner error of a 409.. var hasWriteLock bool @@ -1606,10 +1606,10 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err // https://github.com/hashicorp/terraform-provider-azurerm/issues/9128 envName := meta.(*clients.Client).Account.Environment.Name if envName != autorestAzure.PublicCloud.Name && envName != autorestAzure.USGovernmentCloud.Name && envName != autorestAzure.ChinaCloud.Name { - d.Set("min_tls_version", string(storage.TLS10)) + d.Set("min_tls_version", string(storage.MinimumTLSVersionTLS10)) } else { // For storage account created using old API, the response of GET call will not return "min_tls_version", either. - minTlsVersion := string(storage.TLS10) + minTlsVersion := string(storage.MinimumTLSVersionTLS10) if props.MinimumTLSVersion != "" { minTlsVersion = string(props.MinimumTLSVersion) } @@ -1706,7 +1706,7 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err blobClient := storageClient.BlobServicesClient // FileStorage does not support blob settings - if resp.Kind != storage.FileStorage { + if resp.Kind != storage.KindFileStorage { blobProps, err := blobClient.GetServiceProperties(ctx, resourceGroupName, storageAccountName) if err != nil { if !utils.ResponseWasNotFound(blobProps.Response) { @@ -1722,7 +1722,7 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err fileServiceClient := storageClient.FileServicesClient // FileStorage does not support blob kind, FileStorage Premium is supported - if resp.Kind == storage.FileStorage || resp.Kind != storage.BlobStorage && resp.Kind != storage.BlockBlobStorage && resp.Sku != nil && resp.Sku.Tier != storage.Premium { + if resp.Kind == storage.KindFileStorage || resp.Kind != storage.KindBlobStorage && resp.Kind != storage.KindBlockBlobStorage && resp.Sku != nil && resp.Sku.Tier != storage.SkuTierPremium { shareProps, err := fileServiceClient.GetServiceProperties(ctx, resourceGroupName, storageAccountName) if err != nil { if !utils.ResponseWasNotFound(shareProps.Response) { @@ -1740,8 +1740,8 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err return fmt.Errorf("retrieving Storage Account %q (Resource Group %q): `sku` was nil", storageAccountName, resourceGroupName) } - if resp.Sku.Tier == storage.Standard { - if resp.Kind == storage.Storage || resp.Kind == storage.StorageV2 { + if resp.Sku.Tier == storage.SkuTierStandard { + if resp.Kind == storage.KindStorage || resp.Kind == storage.KindStorageV2 { queueClient, err := storageClient.QueuesClient(ctx, *account) if err != nil { return fmt.Errorf("building Queues Client: %s", err) @@ -1761,7 +1761,7 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err var staticWebsite []interface{} // static website only supported on StorageV2 and BlockBlobStorage - if resp.Kind == storage.StorageV2 || resp.Kind == storage.BlockBlobStorage { + if resp.Kind == storage.KindStorageV2 || resp.Kind == storage.KindBlockBlobStorage { storageClient := meta.(*clients.Client).Storage account, err := storageClient.FindAccount(ctx, storageAccountName) @@ -1965,7 +1965,7 @@ func expandStorageAccountIPRules(networkRule map[string]interface{}) *[]storage. attrs := ipRuleConfig.(string) ipRule := storage.IPRule{ IPAddressOrRange: utils.String(attrs), - Action: storage.Allow, + Action: storage.ActionAllow, } ipRules[i] = ipRule } @@ -1981,7 +1981,7 @@ func expandStorageAccountVirtualNetworks(networkRule map[string]interface{}) *[] attrs := virtualNetworkConfig.(string) virtualNetwork := storage.VirtualNetworkRule{ VirtualNetworkResourceID: utils.String(attrs), - Action: storage.Allow, + Action: storage.ActionAllow, } virtualNetworks[i] = virtualNetwork } diff --git a/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go b/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go index 8ac904ea7013..65eff7c5401b 100644 --- a/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go +++ b/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go @@ -258,7 +258,7 @@ resource "azurerm_role_assignment" "storageAccountRoleAssignment" { } resource "azuread_application" "test" { - name = "acctestspa%[2]d" + display_name = "acctestspa%[2]d" } resource "azuread_service_principal" "test" { diff --git a/internal/services/storage/storage_data_lake_gen2_path_resource_test.go b/internal/services/storage/storage_data_lake_gen2_path_resource_test.go index 6314d23d92da..da5da540a720 100644 --- a/internal/services/storage/storage_data_lake_gen2_path_resource_test.go +++ b/internal/services/storage/storage_data_lake_gen2_path_resource_test.go @@ -226,7 +226,7 @@ func (r StorageDataLakeGen2PathResource) withACLWithSpecificUserAndDefaults(data provider "azuread" {} resource "azuread_application" "test" { - name = "acctestspa%[2]d" + display_name = "acctestspa%[2]d" } resource "azuread_service_principal" "test" { @@ -303,7 +303,7 @@ resource "azurerm_role_assignment" "storage_blob_owner" { } resource "azuread_application" "test" { - name = "acctestspa%[2]d" + display_name = "acctestspa%[2]d" } resource "azuread_service_principal" "test" { diff --git a/internal/services/storage/storage_encryption_scope_resource.go b/internal/services/storage/storage_encryption_scope_resource.go index 61c65e3f25da..4d81b896f49b 100644 --- a/internal/services/storage/storage_encryption_scope_resource.go +++ b/internal/services/storage/storage_encryption_scope_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" keyVaultValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/keyvault/validate" @@ -56,15 +56,15 @@ func resourceStorageEncryptionScope() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(storage.MicrosoftKeyVault), - string(storage.MicrosoftStorage), + string(storage.EncryptionScopeSourceMicrosoftKeyVault), + string(storage.EncryptionScopeSourceMicrosoftStorage), }, false), }, "key_vault_key_id": { Type: pluginsdk.TypeString, Optional: true, - ValidateFunc: keyVaultValidate.KeyVaultChildID, + ValidateFunc: keyVaultValidate.KeyVaultChildIDWithOptionalVersion, }, "infrastructure_encryption_required": { @@ -94,11 +94,11 @@ func resourceStorageEncryptionScopeCreate(d *pluginsdk.ResourceData, meta interf return fmt.Errorf("checking for present of existing Storage Encryption Scope %q (Storage Account Name %q / Resource Group %q): %+v", name, accountId.Name, accountId.ResourceGroup, err) } } - if existing.EncryptionScopeProperties != nil && strings.EqualFold(string(existing.EncryptionScopeProperties.State), string(storage.Enabled)) { + if existing.EncryptionScopeProperties != nil && strings.EqualFold(string(existing.EncryptionScopeProperties.State), string(storage.EncryptionScopeStateEnabled)) { return tf.ImportAsExistsError("azurerm_storage_encryption_scope", resourceId) } - if d.Get("source").(string) == string(storage.MicrosoftKeyVault) { + if d.Get("source").(string) == string(storage.EncryptionScopeSourceMicrosoftKeyVault) { if _, ok := d.GetOk("key_vault_key_id"); !ok { return fmt.Errorf("`key_vault_key_id` is required when source is `%s`", string(storage.KeySourceMicrosoftKeyvault)) } @@ -107,7 +107,7 @@ func resourceStorageEncryptionScopeCreate(d *pluginsdk.ResourceData, meta interf props := storage.EncryptionScope{ EncryptionScopeProperties: &storage.EncryptionScopeProperties{ Source: storage.EncryptionScopeSource(d.Get("source").(string)), - State: storage.Enabled, + State: storage.EncryptionScopeStateEnabled, KeyVaultProperties: &storage.EncryptionScopeKeyVaultProperties{ KeyURI: utils.String(d.Get("key_vault_key_id").(string)), }, @@ -136,7 +136,7 @@ func resourceStorageEncryptionScopeUpdate(d *pluginsdk.ResourceData, meta interf return err } - if d.Get("source").(string) == string(storage.MicrosoftKeyVault) { + if d.Get("source").(string) == string(storage.EncryptionScopeSourceMicrosoftKeyVault) { if _, ok := d.GetOk("key_vault_key_id"); !ok { return fmt.Errorf("`key_vault_key_id` is required when source is `%s`", string(storage.KeySourceMicrosoftKeyvault)) } @@ -145,7 +145,7 @@ func resourceStorageEncryptionScopeUpdate(d *pluginsdk.ResourceData, meta interf props := storage.EncryptionScope{ EncryptionScopeProperties: &storage.EncryptionScopeProperties{ Source: storage.EncryptionScopeSource(d.Get("source").(string)), - State: storage.Enabled, + State: storage.EncryptionScopeStateEnabled, KeyVaultProperties: &storage.EncryptionScopeKeyVaultProperties{ KeyURI: utils.String(d.Get("key_vault_key_id").(string)), }, @@ -185,7 +185,7 @@ func resourceStorageEncryptionScopeRead(d *pluginsdk.ResourceData, meta interfac } props := *resp.EncryptionScopeProperties - if strings.EqualFold(string(props.State), string(storage.Disabled)) { + if strings.EqualFold(string(props.State), string(storage.EncryptionScopeStateDisabled)) { log.Printf("[INFO] Storage Encryption Scope %q (Storage Account Name %q / Resource Group %q) does not exist - removing from state", id.Name, id.StorageAccountName, id.ResourceGroup) d.SetId("") return nil @@ -222,7 +222,7 @@ func resourceStorageEncryptionScopeDelete(d *pluginsdk.ResourceData, meta interf props := storage.EncryptionScope{ EncryptionScopeProperties: &storage.EncryptionScopeProperties{ - State: storage.Disabled, + State: storage.EncryptionScopeStateDisabled, }, } @@ -236,9 +236,9 @@ func resourceStorageEncryptionScopeDelete(d *pluginsdk.ResourceData, meta interf func flattenEncryptionScopeSource(input storage.EncryptionScopeSource) string { // TODO: file a bug // the Storage API differs from every other API in Azure in that these Enum's can be returned case-insensitively - if strings.EqualFold(string(input), string(storage.MicrosoftKeyVault)) { - return string(storage.MicrosoftKeyVault) + if strings.EqualFold(string(input), string(storage.EncryptionScopeSourceMicrosoftKeyVault)) { + return string(storage.EncryptionScopeSourceMicrosoftKeyVault) } - return string(storage.MicrosoftStorage) + return string(storage.EncryptionScopeSourceMicrosoftStorage) } diff --git a/internal/services/storage/storage_encryption_scope_resource_test.go b/internal/services/storage/storage_encryption_scope_resource_test.go index 0c75191fa6f3..ed4ab2549fb5 100644 --- a/internal/services/storage/storage_encryption_scope_resource_test.go +++ b/internal/services/storage/storage_encryption_scope_resource_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -32,6 +32,21 @@ func TestAccStorageEncryptionScope_keyVaultKey(t *testing.T) { }) } +func TestAccStorageEncryptionScope_keyVaultKeyVersionless(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_encryption_scope", "test") + r := StorageEncryptionScopeResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.keyVaultKeyVersionless(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("source").HasValue("Microsoft.KeyVault"), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageEncryptionScope_keyVaultKeyRequireInfrastructureEncryption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_encryption_scope", "test") r := StorageEncryptionScopeResource{} @@ -178,7 +193,7 @@ func (t StorageEncryptionScopeResource) Exists(ctx context.Context, clients *cli enabled := false if resp.EncryptionScopeProperties != nil { - enabled = strings.EqualFold(string(resp.EncryptionScopeProperties.State), string(storage.Enabled)) + enabled = strings.EqualFold(string(resp.EncryptionScopeProperties.State), string(storage.EncryptionScopeStateEnabled)) } return utils.Bool(enabled), nil @@ -206,6 +221,28 @@ resource "azurerm_storage_encryption_scope" "test" { `, template, data.RandomInteger) } +func (t StorageEncryptionScopeResource) keyVaultKeyVersionless(data acceptance.TestData) string { + template := t.template(data) + return fmt.Sprintf(` +provider "azurerm" { + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } +} + +%s + +resource "azurerm_storage_encryption_scope" "test" { + name = "acctestES%d" + storage_account_id = azurerm_storage_account.test.id + source = "Microsoft.KeyVault" + key_vault_key_id = azurerm_key_vault_key.first.versionless_id +} +`, template, data.RandomInteger) +} + func (t StorageEncryptionScopeResource) keyVaultKeyUpdated(data acceptance.TestData) string { template := t.template(data) return fmt.Sprintf(` diff --git a/internal/services/storage/storage_management_policy_resource.go b/internal/services/storage/storage_management_policy_resource.go index 10ed08108062..924f9ae08688 100644 --- a/internal/services/storage/storage_management_policy_resource.go +++ b/internal/services/storage/storage_management_policy_resource.go @@ -5,7 +5,7 @@ import ( "regexp" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" diff --git a/internal/services/storage/storage_object_replication_resource.go b/internal/services/storage/storage_object_replication_resource.go index 74817e3441a5..5e80c5cba2fd 100644 --- a/internal/services/storage/storage_object_replication_resource.go +++ b/internal/services/storage/storage_object_replication_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" diff --git a/internal/services/synapse/parse/workspace_aad_admin.go b/internal/services/synapse/parse/workspace_aad_admin.go new file mode 100644 index 000000000000..6926c18e55cf --- /dev/null +++ b/internal/services/synapse/parse/workspace_aad_admin.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" +) + +type WorkspaceAADAdminId struct { + SubscriptionId string + ResourceGroup string + WorkspaceName string + AdministratorName string +} + +func NewWorkspaceAADAdminID(subscriptionId, resourceGroup, workspaceName, administratorName string) WorkspaceAADAdminId { + return WorkspaceAADAdminId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + WorkspaceName: workspaceName, + AdministratorName: administratorName, + } +} + +func (id WorkspaceAADAdminId) String() string { + segments := []string{ + fmt.Sprintf("Administrator Name %q", id.AdministratorName), + fmt.Sprintf("Workspace Name %q", id.WorkspaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Workspace A A D Admin", segmentsStr) +} + +func (id WorkspaceAADAdminId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Synapse/workspaces/%s/administrators/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.WorkspaceName, id.AdministratorName) +} + +// WorkspaceAADAdminID parses a WorkspaceAADAdmin ID into an WorkspaceAADAdminId struct +func WorkspaceAADAdminID(input string) (*WorkspaceAADAdminId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := WorkspaceAADAdminId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.WorkspaceName, err = id.PopSegment("workspaces"); err != nil { + return nil, err + } + if resourceId.AdministratorName, err = id.PopSegment("administrators"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/internal/services/synapse/parse/workspace_aad_admin_test.go b/internal/services/synapse/parse/workspace_aad_admin_test.go new file mode 100644 index 000000000000..ce7527324097 --- /dev/null +++ b/internal/services/synapse/parse/workspace_aad_admin_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = WorkspaceAADAdminId{} + +func TestWorkspaceAADAdminIDFormatter(t *testing.T) { + actual := NewWorkspaceAADAdminID("12345678-1234-9876-4563-123456789012", "resourceGroup1", "workspace1", "activeDirectory").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/activeDirectory" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestWorkspaceAADAdminID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *WorkspaceAADAdminId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing WorkspaceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/", + Error: true, + }, + + { + // missing value for WorkspaceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/", + Error: true, + }, + + { + // missing AdministratorName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/", + Error: true, + }, + + { + // missing value for AdministratorName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/activeDirectory", + Expected: &WorkspaceAADAdminId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resourceGroup1", + WorkspaceName: "workspace1", + AdministratorName: "activeDirectory", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.SYNAPSE/WORKSPACES/WORKSPACE1/ADMINISTRATORS/ACTIVEDIRECTORY", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := WorkspaceAADAdminID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + if actual.AdministratorName != v.Expected.AdministratorName { + t.Fatalf("Expected %q but got %q for AdministratorName", v.Expected.AdministratorName, actual.AdministratorName) + } + } +} diff --git a/internal/services/synapse/registration.go b/internal/services/synapse/registration.go index 0871409283d7..fb33bfbe5341 100644 --- a/internal/services/synapse/registration.go +++ b/internal/services/synapse/registration.go @@ -40,6 +40,7 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_synapse_sql_pool_vulnerability_assessment": resourceSynapseSqlPoolVulnerabilityAssessment(), "azurerm_synapse_sql_pool_vulnerability_assessment_baseline": resourceSynapseSqlPoolVulnerabilityAssessmentBaseline(), "azurerm_synapse_workspace": resourceSynapseWorkspace(), + "azurerm_synapse_workspace_aad_admin": resourceSynapseWorkspaceAADAdmin(), "azurerm_synapse_workspace_extended_auditing_policy": resourceSynapseWorkspaceExtendedAuditingPolicy(), "azurerm_synapse_workspace_key": resourceSynapseWorkspaceKey(), "azurerm_synapse_workspace_security_alert_policy": resourceSynapseWorkspaceSecurityAlertPolicy(), diff --git a/internal/services/synapse/resourceids.go b/internal/services/synapse/resourceids.go index 310a27831bb4..fe104ab16379 100644 --- a/internal/services/synapse/resourceids.go +++ b/internal/services/synapse/resourceids.go @@ -13,6 +13,7 @@ package synapse //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=SqlPoolVulnerabilityAssessment -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools/sqlPool1/vulnerabilityAssessments/default //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=SqlPoolVulnerabilityAssessmentBaseline -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools/sqlPool1/vulnerabilityAssessments/default/rules/rule1/baselines/baseline1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Workspace -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=WorkspaceAADAdmin -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/activeDirectory //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=WorkspaceExtendedAuditingPolicy -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/extendedAuditingSettings/default //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=WorkspaceKeys -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/keys/key1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=WorkspaceSecurityAlertPolicy -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/securityAlertPolicies/Default diff --git a/internal/services/synapse/synapse_workspace_aad_admin_resource.go b/internal/services/synapse/synapse_workspace_aad_admin_resource.go new file mode 100644 index 000000000000..12c0dce3a96e --- /dev/null +++ b/internal/services/synapse/synapse_workspace_aad_admin_resource.go @@ -0,0 +1,146 @@ +package synapse + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/synapse/mgmt/2021-03-01/synapse" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/synapse/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/synapse/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +func resourceSynapseWorkspaceAADAdmin() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceSynapseWorkspaceAADAdminCreateUpdate, + Read: resourceSynapseWorkspaceAADAdminRead, + Update: resourceSynapseWorkspaceAADAdminCreateUpdate, + Delete: resourceSynapseWorkspaceAADAdminDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.WorkspaceAADAdminID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "synapse_workspace_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validate.WorkspaceID, + }, + + "login": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "object_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.IsUUID, + }, + + "tenant_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.IsUUID, + }, + }, + } +} + +func resourceSynapseWorkspaceAADAdminCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Synapse.WorkspaceAadAdminsClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + workspaceId, err := parse.WorkspaceID(d.Get("synapse_workspace_id").(string)) + if err != nil { + return err + } + workspaceName := workspaceId.Name + workspaceResourceGroup := workspaceId.ResourceGroup + + aadAdmin := &synapse.WorkspaceAadAdminInfo{ + AadAdminProperties: &synapse.AadAdminProperties{ + TenantID: utils.String(d.Get("tenant_id").(string)), + Login: utils.String(d.Get("login").(string)), + AdministratorType: utils.String("ActiveDirectory"), + Sid: utils.String(d.Get("object_id").(string)), + }, + } + + workspaceAadAdminsCreateOrUpdateFuture, err := client.CreateOrUpdate(ctx, workspaceResourceGroup, workspaceName, *aadAdmin) + if err != nil { + return fmt.Errorf("updating Synapse Workspace %q AAD Admin (Resource Group %q): %+v", workspaceName, workspaceResourceGroup, err) + } + + if err = workspaceAadAdminsCreateOrUpdateFuture.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting on updating for Synapse Workspace %q AAD Admin (Resource Group %q): %+v", workspaceName, workspaceResourceGroup, err) + } + + id := parse.NewWorkspaceAADAdminID(workspaceId.SubscriptionId, workspaceId.ResourceGroup, workspaceId.Name, "activeDirectory") + d.SetId(id.ID()) + + return resourceSynapseWorkspaceAADAdminRead(d, meta) +} + +func resourceSynapseWorkspaceAADAdminRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Synapse.WorkspaceAadAdminsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.WorkspaceAADAdminID(d.Id()) + if err != nil { + return err + } + + aadAdmin, err := client.Get(ctx, id.ResourceGroup, id.WorkspaceName) + if err != nil { + if !utils.ResponseWasNotFound(aadAdmin.Response) { + return fmt.Errorf("retrieving Synapse Workspace %q AAD Admin (Resource Group %q): %+v", id.WorkspaceName, id.ResourceGroup, err) + } + } + + workspaceID := parse.NewWorkspaceID(id.SubscriptionId, id.ResourceGroup, id.WorkspaceName) + + d.Set("synapse_workspace_id", workspaceID.ID()) + d.Set("login", aadAdmin.AadAdminProperties.Login) + d.Set("object_id", aadAdmin.AadAdminProperties.Sid) + d.Set("tenant_id", aadAdmin.AadAdminProperties.TenantID) + + return nil +} + +func resourceSynapseWorkspaceAADAdminDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Synapse.WorkspaceAadAdminsClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.WorkspaceAADAdminID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.WorkspaceName) + if err != nil { + return fmt.Errorf("setting empty Synapse Workspace %q AAD Admin (Resource Group %q): %+v", id.WorkspaceName, id.ResourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting on setting empty Synapse Workspace %q AAD Admin (Resource Group %q): %+v", id.WorkspaceName, id.ResourceGroup, err) + } + + return nil +} diff --git a/internal/services/synapse/synapse_workspace_aad_admin_resource_test.go b/internal/services/synapse/synapse_workspace_aad_admin_resource_test.go new file mode 100644 index 000000000000..4dce9680fca3 --- /dev/null +++ b/internal/services/synapse/synapse_workspace_aad_admin_resource_test.go @@ -0,0 +1,103 @@ +package synapse_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/synapse/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type SynapseWorkspaceAADAdminResource struct{} + +func TestAccSynapseWorkspaceAADAdmin_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_synapse_workspace_aad_admin", "test") + r := SynapseWorkspaceAADAdminResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r SynapseWorkspaceAADAdminResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.WorkspaceAADAdminID(state.ID) + if err != nil { + return nil, err + } + + resp, err := client.Synapse.WorkspaceAadAdminsClient.Get(ctx, id.ResourceGroup, id.WorkspaceName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving Synapse Workspace %q (Resource Group %q): %+v", id.WorkspaceName, id.ResourceGroup, err) + } + + return utils.Bool(true), nil +} + +func (r SynapseWorkspaceAADAdminResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s +data "azurerm_client_config" "current" {} + +resource "azurerm_synapse_workspace_aad_admin" "test" { + synapse_workspace_id = azurerm_synapse_workspace.test.id + login = "AzureAD Admin" + object_id = data.azurerm_client_config.current.object_id + tenant_id = data.azurerm_client_config.current.tenant_id +} +`, template) +} + +func (r SynapseWorkspaceAADAdminResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-synapse-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "BlobStorage" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "test" { + name = "acctest-%d" + storage_account_id = azurerm_storage_account.test.id +} + +resource "azurerm_synapse_workspace" "test" { + name = "acctestsw%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.test.id + sql_administrator_login = "sqladminuser" + sql_administrator_login_password = "H@Sh1CoR3!" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +} diff --git a/internal/services/synapse/synapse_workspace_resource.go b/internal/services/synapse/synapse_workspace_resource.go index 45b2ca6542dc..972e13b74aad 100644 --- a/internal/services/synapse/synapse_workspace_resource.go +++ b/internal/services/synapse/synapse_workspace_resource.go @@ -109,11 +109,12 @@ func resourceSynapseWorkspace() *pluginsdk.Resource { }, "aad_admin": { - Type: pluginsdk.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - ConfigMode: pluginsdk.SchemaConfigModeAttr, + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConfigMode: pluginsdk.SchemaConfigModeAttr, + ConflictsWith: []string{"customer_managed_key"}, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ "login": { @@ -303,9 +304,10 @@ func resourceSynapseWorkspace() *pluginsdk.Resource { }, "customer_managed_key": { - Type: pluginsdk.TypeList, - Optional: true, - MaxItems: 1, + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"aad_admin"}, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ "key_versionless_id": { diff --git a/internal/services/synapse/validate/workspace_aad_admin_id.go b/internal/services/synapse/validate/workspace_aad_admin_id.go new file mode 100644 index 000000000000..9740a2267cd0 --- /dev/null +++ b/internal/services/synapse/validate/workspace_aad_admin_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-azurerm/internal/services/synapse/parse" +) + +func WorkspaceAADAdminID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.WorkspaceAADAdminID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/internal/services/synapse/validate/workspace_aad_admin_id_test.go b/internal/services/synapse/validate/workspace_aad_admin_id_test.go new file mode 100644 index 000000000000..e511fa1f69b9 --- /dev/null +++ b/internal/services/synapse/validate/workspace_aad_admin_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestWorkspaceAADAdminID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing WorkspaceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/", + Valid: false, + }, + + { + // missing value for WorkspaceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/", + Valid: false, + }, + + { + // missing AdministratorName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/", + Valid: false, + }, + + { + // missing value for AdministratorName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/activeDirectory", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.SYNAPSE/WORKSPACES/WORKSPACE1/ADMINISTRATORS/ACTIVEDIRECTORY", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := WorkspaceAADAdminID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/constants.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/constants.go index b187bf91fe99..e5c22ea9ffdf 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/constants.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/constants.go @@ -1,5 +1,7 @@ package videoanalyzer +import "strings" + type AccessPolicyEccAlgo string const ( @@ -8,12 +10,54 @@ const ( AccessPolicyEccAlgoESTwoFiveSix AccessPolicyEccAlgo = "ES256" ) +func PossibleValuesForAccessPolicyEccAlgo() []string { + return []string{ + string(AccessPolicyEccAlgoESFiveOneTwo), + string(AccessPolicyEccAlgoESThreeEightFour), + string(AccessPolicyEccAlgoESTwoFiveSix), + } +} + +func parseAccessPolicyEccAlgo(input string) (*AccessPolicyEccAlgo, error) { + vals := map[string]AccessPolicyEccAlgo{ + "es512": AccessPolicyEccAlgoESFiveOneTwo, + "es384": AccessPolicyEccAlgoESThreeEightFour, + "es256": AccessPolicyEccAlgoESTwoFiveSix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccessPolicyEccAlgo(input) + return &out, nil +} + type AccessPolicyRole string const ( AccessPolicyRoleReader AccessPolicyRole = "Reader" ) +func PossibleValuesForAccessPolicyRole() []string { + return []string{ + string(AccessPolicyRoleReader), + } +} + +func parseAccessPolicyRole(input string) (*AccessPolicyRole, error) { + vals := map[string]AccessPolicyRole{ + "reader": AccessPolicyRoleReader, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccessPolicyRole(input) + return &out, nil +} + type AccessPolicyRsaAlgo string const ( @@ -22,6 +66,29 @@ const ( AccessPolicyRsaAlgoRSTwoFiveSix AccessPolicyRsaAlgo = "RS256" ) +func PossibleValuesForAccessPolicyRsaAlgo() []string { + return []string{ + string(AccessPolicyRsaAlgoRSFiveOneTwo), + string(AccessPolicyRsaAlgoRSThreeEightFour), + string(AccessPolicyRsaAlgoRSTwoFiveSix), + } +} + +func parseAccessPolicyRsaAlgo(input string) (*AccessPolicyRsaAlgo, error) { + vals := map[string]AccessPolicyRsaAlgo{ + "rs512": AccessPolicyRsaAlgoRSFiveOneTwo, + "rs384": AccessPolicyRsaAlgoRSThreeEightFour, + "rs256": AccessPolicyRsaAlgoRSTwoFiveSix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccessPolicyRsaAlgo(input) + return &out, nil +} + type AccountEncryptionKeyType string const ( @@ -29,6 +96,27 @@ const ( AccountEncryptionKeyTypeSystemKey AccountEncryptionKeyType = "SystemKey" ) +func PossibleValuesForAccountEncryptionKeyType() []string { + return []string{ + string(AccountEncryptionKeyTypeCustomerKey), + string(AccountEncryptionKeyTypeSystemKey), + } +} + +func parseAccountEncryptionKeyType(input string) (*AccountEncryptionKeyType, error) { + vals := map[string]AccountEncryptionKeyType{ + "customerkey": AccountEncryptionKeyTypeCustomerKey, + "systemkey": AccountEncryptionKeyTypeSystemKey, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AccountEncryptionKeyType(input) + return &out, nil +} + type CheckNameAvailabilityReason string const ( @@ -36,6 +124,27 @@ const ( CheckNameAvailabilityReasonInvalid CheckNameAvailabilityReason = "Invalid" ) +func PossibleValuesForCheckNameAvailabilityReason() []string { + return []string{ + string(CheckNameAvailabilityReasonAlreadyExists), + string(CheckNameAvailabilityReasonInvalid), + } +} + +func parseCheckNameAvailabilityReason(input string) (*CheckNameAvailabilityReason, error) { + vals := map[string]CheckNameAvailabilityReason{ + "alreadyexists": CheckNameAvailabilityReasonAlreadyExists, + "invalid": CheckNameAvailabilityReasonInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CheckNameAvailabilityReason(input) + return &out, nil +} + type CreatedByType string const ( @@ -45,14 +154,77 @@ const ( CreatedByTypeUser CreatedByType = "User" ) +func PossibleValuesForCreatedByType() []string { + return []string{ + string(CreatedByTypeApplication), + string(CreatedByTypeKey), + string(CreatedByTypeManagedIdentity), + string(CreatedByTypeUser), + } +} + +func parseCreatedByType(input string) (*CreatedByType, error) { + vals := map[string]CreatedByType{ + "application": CreatedByTypeApplication, + "key": CreatedByTypeKey, + "managedidentity": CreatedByTypeManagedIdentity, + "user": CreatedByTypeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CreatedByType(input) + return &out, nil +} + type VideoAnalyzerEndpointType string const ( VideoAnalyzerEndpointTypeClientApi VideoAnalyzerEndpointType = "ClientApi" ) +func PossibleValuesForVideoAnalyzerEndpointType() []string { + return []string{ + string(VideoAnalyzerEndpointTypeClientApi), + } +} + +func parseVideoAnalyzerEndpointType(input string) (*VideoAnalyzerEndpointType, error) { + vals := map[string]VideoAnalyzerEndpointType{ + "clientapi": VideoAnalyzerEndpointTypeClientApi, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VideoAnalyzerEndpointType(input) + return &out, nil +} + type VideoType string const ( VideoTypeArchive VideoType = "Archive" ) + +func PossibleValuesForVideoType() []string { + return []string{ + string(VideoTypeArchive), + } +} + +func parseVideoType(input string) (*VideoType, error) { + vals := map[string]VideoType{ + "archive": VideoTypeArchive, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VideoType(input) + return &out, nil +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies.go index 0bd650405aff..858b038713c4 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = AccessPoliciesId{} + +// AccessPoliciesId is a struct representing the Resource ID for a Access Policies type AccessPoliciesId struct { SubscriptionId string - ResourceGroup string - VideoAnalyzerName string + ResourceGroupName string + AccountName string AccessPolicyName string } -func NewAccessPoliciesID(subscriptionId, resourceGroup, videoAnalyzerName, accessPolicyName string) AccessPoliciesId { +// NewAccessPoliciesID returns a new AccessPoliciesId struct +func NewAccessPoliciesID(subscriptionId string, resourceGroupName string, accountName string, accessPolicyName string) AccessPoliciesId { return AccessPoliciesId{ SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - VideoAnalyzerName: videoAnalyzerName, + ResourceGroupName: resourceGroupName, + AccountName: accountName, AccessPolicyName: accessPolicyName, } } -func (id AccessPoliciesId) String() string { - segments := []string{ - fmt.Sprintf("Access Policy Name %q", id.AccessPolicyName), - fmt.Sprintf("Video Analyzer Name %q", id.VideoAnalyzerName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Access Policies", segmentsStr) -} - -func (id AccessPoliciesId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s/accessPolicies/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.VideoAnalyzerName, id.AccessPolicyName) -} - -// ParseAccessPoliciesID parses a AccessPolicies ID into an AccessPoliciesId struct +// ParseAccessPoliciesID parses 'input' into a AccessPoliciesId func ParseAccessPoliciesID(input string) (*AccessPoliciesId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AccessPoliciesId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AccessPoliciesId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := AccessPoliciesId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.VideoAnalyzerName, err = id.PopSegment("videoAnalyzers"); err != nil { - return nil, err - } - if resourceId.AccessPolicyName, err = id.PopSegment("accessPolicies"); err != nil { - return nil, err + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.AccessPolicyName, ok = parsed.Parsed["accessPolicyName"]; !ok { + return nil, fmt.Errorf("the segment 'accessPolicyName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseAccessPoliciesIDInsensitively parses an AccessPolicies ID into an AccessPoliciesId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseAccessPoliciesID method should be used instead for validation etc. +// ParseAccessPoliciesIDInsensitively parses 'input' case-insensitively into a AccessPoliciesId +// note: this method should only be used for API response data and not user input func ParseAccessPoliciesIDInsensitively(input string) (*AccessPoliciesId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AccessPoliciesId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AccessPoliciesId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := AccessPoliciesId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'videoAnalyzers' segment - videoAnalyzersKey := "videoAnalyzers" - for key := range id.Path { - if strings.EqualFold(key, videoAnalyzersKey) { - videoAnalyzersKey = key - break - } + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - if resourceId.VideoAnalyzerName, err = id.PopSegment(videoAnalyzersKey); err != nil { - return nil, err + + if id.AccessPolicyName, ok = parsed.Parsed["accessPolicyName"]; !ok { + return nil, fmt.Errorf("the segment 'accessPolicyName' was not found in the resource id %q", input) } - // find the correct casing for the 'accessPolicies' segment - accessPoliciesKey := "accessPolicies" - for key := range id.Path { - if strings.EqualFold(key, accessPoliciesKey) { - accessPoliciesKey = key - break - } + return &id, nil +} + +// ValidateAccessPoliciesID checks that 'input' can be parsed as a Access Policies ID +func ValidateAccessPoliciesID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.AccessPolicyName, err = id.PopSegment(accessPoliciesKey); err != nil { - return nil, err + + if _, err := ParseAccessPoliciesID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Access Policies ID +func (id AccessPoliciesId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s/accessPolicies/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName, id.AccessPolicyName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Access Policies ID +func (id AccessPoliciesId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftMedia", "Microsoft.Media", "Microsoft.Media"), + resourceids.StaticSegment("staticVideoAnalyzers", "videoAnalyzers", "videoAnalyzers"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), + resourceids.StaticSegment("staticAccessPolicies", "accessPolicies", "accessPolicies"), + resourceids.UserSpecifiedSegment("accessPolicyName", "accessPolicyValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Access Policies ID +func (id AccessPoliciesId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + fmt.Sprintf("Access Policy Name: %q", id.AccessPolicyName), + } + return fmt.Sprintf("Access Policies (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies_test.go index dd81e1d9ca45..d3a49d964bbe 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_accesspolicies_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = AccessPoliciesId{} +var _ resourceids.ResourceId = AccessPoliciesId{} -func TestAccessPoliciesIDFormatter(t *testing.T) { - actual := NewAccessPoliciesID("{subscriptionId}", "{resourceGroupName}", "{accountName}", "{accessPolicyName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/accessPolicies/{accessPolicyName}" +func TestNewAccessPoliciesID(t *testing.T) { + id := NewAccessPoliciesID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "accessPolicyValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } + + if id.AccessPolicyName != "accessPolicyValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccessPolicyName'", id.AccessPolicyName, "accessPolicyValue") + } +} + +func TestFormatAccessPoliciesID(t *testing.T) { + actual := NewAccessPoliciesID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "accessPolicyValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies/accessPolicyValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseAccessPoliciesID(t *testing.T) { Error bool Expected *AccessPoliciesId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", Error: true, }, - { - // missing AccessPolicyName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", Error: true, }, - { - // missing value for AccessPolicyName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/accessPolicies/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/accessPolicies/{accessPolicyName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies/accessPolicyValue", Expected: &AccessPoliciesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - AccessPolicyName: "{accessPolicyName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + AccessPolicyName: "accessPolicyValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.MEDIA/VIDEOANALYZERS/{ACCOUNTNAME}/ACCESSPOLICIES/{ACCESSPOLICYNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies/accessPolicyValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseAccessPoliciesID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseAccessPoliciesID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.VideoAnalyzerName != v.Expected.VideoAnalyzerName { - t.Fatalf("Expected %q but got %q for VideoAnalyzerName", v.Expected.VideoAnalyzerName, actual.VideoAnalyzerName) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } + if actual.AccessPolicyName != v.Expected.AccessPolicyName { t.Fatalf("Expected %q but got %q for AccessPolicyName", v.Expected.AccessPolicyName, actual.AccessPolicyName) } + } } @@ -131,106 +148,132 @@ func TestParseAccessPoliciesIDInsensitively(t *testing.T) { Error bool Expected *AccessPoliciesId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing AccessPolicyName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for AccessPolicyName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/accessPolicies/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/accessPolicies/{accessPolicyName}", - Expected: &AccessPoliciesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - AccessPolicyName: "{accessPolicyName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoanalyzers/{accountName}/accesspolicies/{accessPolicyName}", - Expected: &AccessPoliciesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - AccessPolicyName: "{accessPolicyName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/aCcEsSpOlIcIeS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/VIDEOANALYZERS/{accountName}/ACCESSPOLICIES/{accessPolicyName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies/accessPolicyValue", Expected: &AccessPoliciesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - AccessPolicyName: "{accessPolicyName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + AccessPolicyName: "accessPolicyValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/ViDeOaNaLyZeRs/{accountName}/AcCeSsPoLiCiEs/{accessPolicyName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/accessPolicies/accessPolicyValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/aCcEsSpOlIcIeS/aCcEsSpOlIcYvAlUe", Expected: &AccessPoliciesId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - AccessPolicyName: "{accessPolicyName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", + AccessPolicyName: "aCcEsSpOlIcYvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/aCcEsSpOlIcIeS/aCcEsSpOlIcYvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseAccessPoliciesIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,33 @@ func TestParseAccessPoliciesIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.VideoAnalyzerName != v.Expected.VideoAnalyzerName { - t.Fatalf("Expected %q but got %q for VideoAnalyzerName", v.Expected.VideoAnalyzerName, actual.VideoAnalyzerName) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } + if actual.AccessPolicyName != v.Expected.AccessPolicyName { t.Fatalf("Expected %q but got %q for AccessPolicyName", v.Expected.AccessPolicyName, actual.AccessPolicyName) } + + } +} + +func TestSegmentsForAccessPoliciesId(t *testing.T) { + segments := AccessPoliciesId{}.Segments() + if len(segments) == 0 { + t.Fatalf("AccessPoliciesId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule.go index 8d137e34b636..10fa71d5b918 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = EdgeModuleId{} + +// EdgeModuleId is a struct representing the Resource ID for a Edge Module type EdgeModuleId struct { SubscriptionId string - ResourceGroup string - VideoAnalyzerName string - Name string + ResourceGroupName string + AccountName string + EdgeModuleName string } -func NewEdgeModuleID(subscriptionId, resourceGroup, videoAnalyzerName, name string) EdgeModuleId { +// NewEdgeModuleID returns a new EdgeModuleId struct +func NewEdgeModuleID(subscriptionId string, resourceGroupName string, accountName string, edgeModuleName string) EdgeModuleId { return EdgeModuleId{ SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - VideoAnalyzerName: videoAnalyzerName, - Name: name, + ResourceGroupName: resourceGroupName, + AccountName: accountName, + EdgeModuleName: edgeModuleName, } } -func (id EdgeModuleId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Video Analyzer Name %q", id.VideoAnalyzerName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Edge Module", segmentsStr) -} - -func (id EdgeModuleId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s/edgeModules/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.VideoAnalyzerName, id.Name) -} - -// ParseEdgeModuleID parses a EdgeModule ID into an EdgeModuleId struct +// ParseEdgeModuleID parses 'input' into a EdgeModuleId func ParseEdgeModuleID(input string) (*EdgeModuleId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(EdgeModuleId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := EdgeModuleId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := EdgeModuleId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.VideoAnalyzerName, err = id.PopSegment("videoAnalyzers"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("edgeModules"); err != nil { - return nil, err + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.EdgeModuleName, ok = parsed.Parsed["edgeModuleName"]; !ok { + return nil, fmt.Errorf("the segment 'edgeModuleName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseEdgeModuleIDInsensitively parses an EdgeModule ID into an EdgeModuleId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseEdgeModuleID method should be used instead for validation etc. +// ParseEdgeModuleIDInsensitively parses 'input' case-insensitively into a EdgeModuleId +// note: this method should only be used for API response data and not user input func ParseEdgeModuleIDInsensitively(input string) (*EdgeModuleId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(EdgeModuleId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := EdgeModuleId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := EdgeModuleId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'videoAnalyzers' segment - videoAnalyzersKey := "videoAnalyzers" - for key := range id.Path { - if strings.EqualFold(key, videoAnalyzersKey) { - videoAnalyzersKey = key - break - } + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - if resourceId.VideoAnalyzerName, err = id.PopSegment(videoAnalyzersKey); err != nil { - return nil, err + + if id.EdgeModuleName, ok = parsed.Parsed["edgeModuleName"]; !ok { + return nil, fmt.Errorf("the segment 'edgeModuleName' was not found in the resource id %q", input) } - // find the correct casing for the 'edgeModules' segment - edgeModulesKey := "edgeModules" - for key := range id.Path { - if strings.EqualFold(key, edgeModulesKey) { - edgeModulesKey = key - break - } + return &id, nil +} + +// ValidateEdgeModuleID checks that 'input' can be parsed as a Edge Module ID +func ValidateEdgeModuleID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(edgeModulesKey); err != nil { - return nil, err + + if _, err := ParseEdgeModuleID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Edge Module ID +func (id EdgeModuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s/edgeModules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName, id.EdgeModuleName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Edge Module ID +func (id EdgeModuleId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftMedia", "Microsoft.Media", "Microsoft.Media"), + resourceids.StaticSegment("staticVideoAnalyzers", "videoAnalyzers", "videoAnalyzers"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), + resourceids.StaticSegment("staticEdgeModules", "edgeModules", "edgeModules"), + resourceids.UserSpecifiedSegment("edgeModuleName", "edgeModuleValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Edge Module ID +func (id EdgeModuleId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + fmt.Sprintf("Edge Module Name: %q", id.EdgeModuleName), + } + return fmt.Sprintf("Edge Module (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule_test.go index 16b9a12b2bf9..04feb01e16eb 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_edgemodule_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = EdgeModuleId{} +var _ resourceids.ResourceId = EdgeModuleId{} -func TestEdgeModuleIDFormatter(t *testing.T) { - actual := NewEdgeModuleID("{subscriptionId}", "{resourceGroupName}", "{accountName}", "{edgeModuleName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/edgeModules/{edgeModuleName}" +func TestNewEdgeModuleID(t *testing.T) { + id := NewEdgeModuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "edgeModuleValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } + + if id.EdgeModuleName != "edgeModuleValue" { + t.Fatalf("Expected %q but got %q for Segment 'EdgeModuleName'", id.EdgeModuleName, "edgeModuleValue") + } +} + +func TestFormatEdgeModuleID(t *testing.T) { + actual := NewEdgeModuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "edgeModuleValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules/edgeModuleValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseEdgeModuleID(t *testing.T) { Error bool Expected *EdgeModuleId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/edgeModules/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/edgeModules/{edgeModuleName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules/edgeModuleValue", Expected: &EdgeModuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{edgeModuleName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + EdgeModuleName: "edgeModuleValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.MEDIA/VIDEOANALYZERS/{ACCOUNTNAME}/EDGEMODULES/{EDGEMODULENAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules/edgeModuleValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseEdgeModuleID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseEdgeModuleID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.VideoAnalyzerName != v.Expected.VideoAnalyzerName { - t.Fatalf("Expected %q but got %q for VideoAnalyzerName", v.Expected.VideoAnalyzerName, actual.VideoAnalyzerName) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.EdgeModuleName != v.Expected.EdgeModuleName { + t.Fatalf("Expected %q but got %q for EdgeModuleName", v.Expected.EdgeModuleName, actual.EdgeModuleName) } + } } @@ -131,106 +148,132 @@ func TestParseEdgeModuleIDInsensitively(t *testing.T) { Error bool Expected *EdgeModuleId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/edgeModules/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/edgeModules/{edgeModuleName}", - Expected: &EdgeModuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{edgeModuleName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoanalyzers/{accountName}/edgemodules/{edgeModuleName}", - Expected: &EdgeModuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{edgeModuleName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/eDgEmOdUlEs", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/VIDEOANALYZERS/{accountName}/EDGEMODULES/{edgeModuleName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules/edgeModuleValue", Expected: &EdgeModuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{edgeModuleName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + EdgeModuleName: "edgeModuleValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/ViDeOaNaLyZeRs/{accountName}/EdGeMoDuLeS/{edgeModuleName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/edgeModules/edgeModuleValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/eDgEmOdUlEs/eDgEmOdUlEvAlUe", Expected: &EdgeModuleId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{edgeModuleName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", + EdgeModuleName: "eDgEmOdUlEvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/eDgEmOdUlEs/eDgEmOdUlEvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseEdgeModuleIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,33 @@ func TestParseEdgeModuleIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.VideoAnalyzerName != v.Expected.VideoAnalyzerName { - t.Fatalf("Expected %q but got %q for VideoAnalyzerName", v.Expected.VideoAnalyzerName, actual.VideoAnalyzerName) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.EdgeModuleName != v.Expected.EdgeModuleName { + t.Fatalf("Expected %q but got %q for EdgeModuleName", v.Expected.EdgeModuleName, actual.EdgeModuleName) } + + } +} + +func TestSegmentsForEdgeModuleId(t *testing.T) { + segments := EdgeModuleId{}.Segments() + if len(segments) == 0 { + t.Fatalf("EdgeModuleId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location.go index 5fc7e1bd0db3..2a9a9f715884 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location.go @@ -7,89 +7,105 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = LocationId{} + +// LocationId is a struct representing the Resource ID for a Location type LocationId struct { SubscriptionId string - Name string + LocationName string } -func NewLocationID(subscriptionId, name string) LocationId { +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { return LocationId{ SubscriptionId: subscriptionId, - Name: name, + LocationName: locationName, } } -func (id LocationId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Location", segmentsStr) -} - -func (id LocationId) ID() string { - fmtString := "/subscriptions/%s/providers/Microsoft.Media/locations/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.Name) -} - -// ParseLocationID parses a Location ID into an LocationId struct +// ParseLocationID parses 'input' into a LocationId func ParseLocationID(input string) (*LocationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := LocationId{ - SubscriptionId: id.SubscriptionID, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := LocationId{} - if resourceId.Name, err = id.PopSegment("locations"); err != nil { - return nil, err + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.LocationName, ok = parsed.Parsed["locationName"]; !ok { + return nil, fmt.Errorf("the segment 'locationName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseLocationIDInsensitively parses an Location ID into an LocationId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseLocationID method should be used instead for validation etc. +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input func ParseLocationIDInsensitively(input string) (*LocationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := LocationId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.LocationName, ok = parsed.Parsed["locationName"]; !ok { + return nil, fmt.Errorf("the segment 'locationName' was not found in the resource id %q", input) } - // find the correct casing for the 'locations' segment - locationsKey := "locations" - for key := range id.Path { - if strings.EqualFold(key, locationsKey) { - locationsKey = key - break - } + return &id, nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(locationsKey); err != nil { - return nil, err + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.Media/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftMedia", "Microsoft.Media", "Microsoft.Media"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location_test.go index 1398b87de2bc..54d4c85f8e79 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_location_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = LocationId{} +var _ resourceids.ResourceId = LocationId{} -func TestLocationIDFormatter(t *testing.T) { - actual := NewLocationID("{subscriptionId}", "{locationName}").ID() - expected := "/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/{locationName}" +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationValue" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationValue") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations/locationValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,50 @@ func TestParseLocationID(t *testing.T) { Error bool Expected *LocationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/{locationName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations/locationValue", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{locationName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/PROVIDERS/MICROSOFT.MEDIA/LOCATIONS/{LOCATIONNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations/locationValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +87,7 @@ func TestParseLocationID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +96,11 @@ func TestParseLocationID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) } + } } @@ -99,74 +110,88 @@ func TestParseLocationIDInsensitively(t *testing.T) { Error bool Expected *LocationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/{locationName}", - Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{locationName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/{locationName}", - Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{locationName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.mEdIa", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/LOCATIONS/{locationName}", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.mEdIa/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations/locationValue", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{locationName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/providers/Microsoft.Media/LoCaTiOnS/{locationName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Media/locations/locationValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.mEdIa/lOcAtIoNs/lOcAtIoNvAlUe", Expected: &LocationId{ - SubscriptionId: "{subscriptionId}", - Name: "{locationName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.mEdIa/lOcAtIoNs/lOcAtIoNvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +201,7 @@ func TestParseLocationIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +210,25 @@ func TestParseLocationIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup.go index f0e072226cf4..81fcb1da9578 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup_test.go index 31d33ccb412a..a44258d61860 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,25 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + + } +} + +func TestSegmentsForResourceGroupId(t *testing.T) { + segments := ResourceGroupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ResourceGroupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription.go index ab2f4d3b4678..59eaf25c68d5 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription_test.go index bfe84007af0d..9473a2e3b9f9 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,21 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + + } +} + +func TestSegmentsForSubscriptionId(t *testing.T) { + segments := SubscriptionId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SubscriptionId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video.go index bb8545a1ebce..c6139ec3b383 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = VideoId{} + +// VideoId is a struct representing the Resource ID for a Video type VideoId struct { SubscriptionId string - ResourceGroup string - VideoAnalyzerName string - Name string + ResourceGroupName string + AccountName string + VideoName string } -func NewVideoID(subscriptionId, resourceGroup, videoAnalyzerName, name string) VideoId { +// NewVideoID returns a new VideoId struct +func NewVideoID(subscriptionId string, resourceGroupName string, accountName string, videoName string) VideoId { return VideoId{ SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - VideoAnalyzerName: videoAnalyzerName, - Name: name, + ResourceGroupName: resourceGroupName, + AccountName: accountName, + VideoName: videoName, } } -func (id VideoId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Video Analyzer Name %q", id.VideoAnalyzerName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Video", segmentsStr) -} - -func (id VideoId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s/videos/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.VideoAnalyzerName, id.Name) -} - -// ParseVideoID parses a Video ID into an VideoId struct +// ParseVideoID parses 'input' into a VideoId func ParseVideoID(input string) (*VideoId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(VideoId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := VideoId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := VideoId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.VideoAnalyzerName, err = id.PopSegment("videoAnalyzers"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("videos"); err != nil { - return nil, err + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.VideoName, ok = parsed.Parsed["videoName"]; !ok { + return nil, fmt.Errorf("the segment 'videoName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseVideoIDInsensitively parses an Video ID into an VideoId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseVideoID method should be used instead for validation etc. +// ParseVideoIDInsensitively parses 'input' case-insensitively into a VideoId +// note: this method should only be used for API response data and not user input func ParseVideoIDInsensitively(input string) (*VideoId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(VideoId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := VideoId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := VideoId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'videoAnalyzers' segment - videoAnalyzersKey := "videoAnalyzers" - for key := range id.Path { - if strings.EqualFold(key, videoAnalyzersKey) { - videoAnalyzersKey = key - break - } + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - if resourceId.VideoAnalyzerName, err = id.PopSegment(videoAnalyzersKey); err != nil { - return nil, err + + if id.VideoName, ok = parsed.Parsed["videoName"]; !ok { + return nil, fmt.Errorf("the segment 'videoName' was not found in the resource id %q", input) } - // find the correct casing for the 'videos' segment - videosKey := "videos" - for key := range id.Path { - if strings.EqualFold(key, videosKey) { - videosKey = key - break - } + return &id, nil +} + +// ValidateVideoID checks that 'input' can be parsed as a Video ID +func ValidateVideoID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(videosKey); err != nil { - return nil, err + + if _, err := ParseVideoID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Video ID +func (id VideoId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s/videos/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName, id.VideoName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Video ID +func (id VideoId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftMedia", "Microsoft.Media", "Microsoft.Media"), + resourceids.StaticSegment("staticVideoAnalyzers", "videoAnalyzers", "videoAnalyzers"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), + resourceids.StaticSegment("staticVideos", "videos", "videos"), + resourceids.UserSpecifiedSegment("videoName", "videoValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Video ID +func (id VideoId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + fmt.Sprintf("Video Name: %q", id.VideoName), + } + return fmt.Sprintf("Video (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video_test.go index 4c6e369637b1..2b73e8fac7fd 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_video_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = VideoId{} +var _ resourceids.ResourceId = VideoId{} -func TestVideoIDFormatter(t *testing.T) { - actual := NewVideoID("{subscriptionId}", "{resourceGroupName}", "{accountName}", "{videoName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}" +func TestNewVideoID(t *testing.T) { + id := NewVideoID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "videoValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } + + if id.VideoName != "videoValue" { + t.Fatalf("Expected %q but got %q for Segment 'VideoName'", id.VideoName, "videoValue") + } +} + +func TestFormatVideoID(t *testing.T) { + actual := NewVideoID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue", "videoValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos/videoValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseVideoID(t *testing.T) { Error bool Expected *VideoId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos/videoValue", Expected: &VideoId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{videoName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + VideoName: "videoValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.MEDIA/VIDEOANALYZERS/{ACCOUNTNAME}/VIDEOS/{VIDEONAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos/videoValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseVideoID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseVideoID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.VideoAnalyzerName != v.Expected.VideoAnalyzerName { - t.Fatalf("Expected %q but got %q for VideoAnalyzerName", v.Expected.VideoAnalyzerName, actual.VideoAnalyzerName) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.VideoName != v.Expected.VideoName { + t.Fatalf("Expected %q but got %q for VideoName", v.Expected.VideoName, actual.VideoName) } + } } @@ -131,106 +148,132 @@ func TestParseVideoIDInsensitively(t *testing.T) { Error bool Expected *VideoId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for VideoAnalyzerName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}", - Expected: &VideoId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{videoName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoanalyzers/{accountName}/videos/{videoName}", - Expected: &VideoId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{videoName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/vIdEoS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/VIDEOANALYZERS/{accountName}/VIDEOS/{videoName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos/videoValue", Expected: &VideoId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{videoName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", + VideoName: "videoValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/ViDeOaNaLyZeRs/{accountName}/ViDeOs/{videoName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/videos/videoValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/vIdEoS/vIdEoVaLuE", Expected: &VideoId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - VideoAnalyzerName: "{accountName}", - Name: "{videoName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", + VideoName: "vIdEoVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/vIdEoS/vIdEoVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseVideoIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,33 @@ func TestParseVideoIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.VideoAnalyzerName != v.Expected.VideoAnalyzerName { - t.Fatalf("Expected %q but got %q for VideoAnalyzerName", v.Expected.VideoAnalyzerName, actual.VideoAnalyzerName) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.VideoName != v.Expected.VideoName { + t.Fatalf("Expected %q but got %q for VideoName", v.Expected.VideoName, actual.VideoName) } + + } +} + +func TestSegmentsForVideoId(t *testing.T) { + segments := VideoId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VideoId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer.go index 89fa48a85765..8eba6a0e47cc 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = VideoAnalyzerId{} + +// VideoAnalyzerId is a struct representing the Resource ID for a Video Analyzer type VideoAnalyzerId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + AccountName string } -func NewVideoAnalyzerID(subscriptionId, resourceGroup, name string) VideoAnalyzerId { +// NewVideoAnalyzerID returns a new VideoAnalyzerId struct +func NewVideoAnalyzerID(subscriptionId string, resourceGroupName string, accountName string) VideoAnalyzerId { return VideoAnalyzerId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + AccountName: accountName, } } -func (id VideoAnalyzerId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Video Analyzer", segmentsStr) -} - -func (id VideoAnalyzerId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParseVideoAnalyzerID parses a VideoAnalyzer ID into an VideoAnalyzerId struct +// ParseVideoAnalyzerID parses 'input' into a VideoAnalyzerId func ParseVideoAnalyzerID(input string) (*VideoAnalyzerId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(VideoAnalyzerId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := VideoAnalyzerId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := VideoAnalyzerId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("videoAnalyzers"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseVideoAnalyzerIDInsensitively parses an VideoAnalyzer ID into an VideoAnalyzerId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseVideoAnalyzerID method should be used instead for validation etc. +// ParseVideoAnalyzerIDInsensitively parses 'input' case-insensitively into a VideoAnalyzerId +// note: this method should only be used for API response data and not user input func ParseVideoAnalyzerIDInsensitively(input string) (*VideoAnalyzerId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(VideoAnalyzerId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := VideoAnalyzerId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := VideoAnalyzerId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.AccountName, ok = parsed.Parsed["accountName"]; !ok { + return nil, fmt.Errorf("the segment 'accountName' was not found in the resource id %q", input) } - // find the correct casing for the 'videoAnalyzers' segment - videoAnalyzersKey := "videoAnalyzers" - for key := range id.Path { - if strings.EqualFold(key, videoAnalyzersKey) { - videoAnalyzersKey = key - break - } + return &id, nil +} + +// ValidateVideoAnalyzerID checks that 'input' can be parsed as a Video Analyzer ID +func ValidateVideoAnalyzerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(videoAnalyzersKey); err != nil { - return nil, err + + if _, err := ParseVideoAnalyzerID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Video Analyzer ID +func (id VideoAnalyzerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/videoAnalyzers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.AccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Video Analyzer ID +func (id VideoAnalyzerId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftMedia", "Microsoft.Media", "Microsoft.Media"), + resourceids.StaticSegment("staticVideoAnalyzers", "videoAnalyzers", "videoAnalyzers"), + resourceids.UserSpecifiedSegment("accountName", "accountValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Video Analyzer ID +func (id VideoAnalyzerId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Account Name: %q", id.AccountName), + } + return fmt.Sprintf("Video Analyzer (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer_test.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer_test.go index 8f20afbeb47a..8036b2ef2230 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer_test.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/id_videoanalyzer_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = VideoAnalyzerId{} +var _ resourceids.ResourceId = VideoAnalyzerId{} -func TestVideoAnalyzerIDFormatter(t *testing.T) { - actual := NewVideoAnalyzerID("{subscriptionId}", "{resourceGroupName}", "{accountName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}" +func TestNewVideoAnalyzerID(t *testing.T) { + id := NewVideoAnalyzerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.AccountName != "accountValue" { + t.Fatalf("Expected %q but got %q for Segment 'AccountName'", id.AccountName, "accountValue") + } +} + +func TestFormatVideoAnalyzerID(t *testing.T) { + actual := NewVideoAnalyzerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "accountValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParseVideoAnalyzerID(t *testing.T) { Error bool Expected *VideoAnalyzerId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", Expected: &VideoAnalyzerId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.MEDIA/VIDEOANALYZERS/{ACCOUNTNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParseVideoAnalyzerID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParseVideoAnalyzerID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } + } } @@ -115,90 +129,110 @@ func TestParseVideoAnalyzerIDInsensitively(t *testing.T) { Error bool Expected *VideoAnalyzerId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}", - Expected: &VideoAnalyzerId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoanalyzers/{accountName}", - Expected: &VideoAnalyzerId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/VIDEOANALYZERS/{accountName}", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue", Expected: &VideoAnalyzerId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + AccountName: "accountValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/ViDeOaNaLyZeRs/{accountName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Media/videoAnalyzers/accountValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE", Expected: &VideoAnalyzerId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{accountName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + AccountName: "aCcOuNtVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.mEdIa/vIdEoAnAlYzErS/aCcOuNtVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParseVideoAnalyzerIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,29 @@ func TestParseVideoAnalyzerIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AccountName != v.Expected.AccountName { + t.Fatalf("Expected %q but got %q for AccountName", v.Expected.AccountName, actual.AccountName) } + + } +} + +func TestSegmentsForVideoAnalyzerId(t *testing.T) { + segments := VideoAnalyzerId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VideoAnalyzerId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyentity.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyentity.go index 92a8cc48b7ef..89df6403909e 100644 --- a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyentity.go +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyentity.go @@ -1,8 +1,9 @@ package videoanalyzer type AccessPolicyEntity struct { - Id *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - SystemData *SystemData `json:"systemData,omitempty"` - Type *string `json:"type,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *AccessPolicyProperties `json:"properties,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` } diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyproperties.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyproperties.go new file mode 100644 index 000000000000..7f37b8d64ceb --- /dev/null +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_accesspolicyproperties.go @@ -0,0 +1,37 @@ +package videoanalyzer + +import ( + "encoding/json" + "fmt" +) + +type AccessPolicyProperties struct { + Authentication AuthenticationBase `json:"authentication"` + Role *AccessPolicyRole `json:"role,omitempty"` +} + +var _ json.Unmarshaler = &AccessPolicyProperties{} + +func (s *AccessPolicyProperties) UnmarshalJSON(bytes []byte) error { + type alias AccessPolicyProperties + var decoded alias + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling into AccessPolicyProperties: %+v", err) + } + + s.Role = decoded.Role + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling AccessPolicyProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["authentication"]; ok { + impl, err := unmarshalAuthenticationBaseImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Authentication' for 'AccessPolicyProperties': %+v", err) + } + s.Authentication = impl + } + return nil +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_authenticationbase.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_authenticationbase.go new file mode 100644 index 000000000000..e81c2d00f09d --- /dev/null +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_authenticationbase.go @@ -0,0 +1,45 @@ +package videoanalyzer + +import ( + "encoding/json" + "fmt" + "strings" +) + +type AuthenticationBase interface { +} + +func unmarshalAuthenticationBaseImplementation(input []byte) (AuthenticationBase, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling AuthenticationBase into map[string]interface: %+v", err) + } + + value, ok := temp["@type"].(string) + if !ok { + return nil, nil + } + + if strings.EqualFold(value, "#Microsoft.VideoAnalyzer.JwtAuthentication") { + var out JwtAuthentication + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into JwtAuthentication: %+v", err) + } + return out, nil + } + + type RawAuthenticationBaseImpl struct { + Type string `json:"-"` + Values map[string]interface{} `json:"-"` + } + out := RawAuthenticationBaseImpl{ + Type: value, + Values: temp, + } + return out, nil + +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_ecctokenkey.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_ecctokenkey.go new file mode 100644 index 000000000000..3719b34c41b3 --- /dev/null +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_ecctokenkey.go @@ -0,0 +1,41 @@ +package videoanalyzer + +import ( + "encoding/json" + "fmt" +) + +var _ TokenKey = EccTokenKey{} + +type EccTokenKey struct { + Alg AccessPolicyEccAlgo `json:"alg"` + X string `json:"x"` + Y string `json:"y"` + + // Fields inherited from TokenKey + Kid string `json:"kid"` +} + +var _ json.Marshaler = EccTokenKey{} + +func (s EccTokenKey) MarshalJSON() ([]byte, error) { + type wrapper EccTokenKey + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling EccTokenKey: %+v", err) + } + + var decoded map[string]interface{} + if err := json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling EccTokenKey: %+v", err) + } + decoded["@type"] = "#Microsoft.VideoAnalyzer.EccTokenKey" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling EccTokenKey: %+v", err) + } + + return encoded, nil +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_jwtauthentication.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_jwtauthentication.go new file mode 100644 index 000000000000..c9404467d784 --- /dev/null +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_jwtauthentication.go @@ -0,0 +1,78 @@ +package videoanalyzer + +import ( + "encoding/json" + "fmt" +) + +var _ AuthenticationBase = JwtAuthentication{} + +type JwtAuthentication struct { + Audiences *[]string `json:"audiences,omitempty"` + Claims *[]TokenClaim `json:"claims,omitempty"` + Issuers *[]string `json:"issuers,omitempty"` + Keys *[]TokenKey `json:"keys,omitempty"` + + // Fields inherited from AuthenticationBase +} + +var _ json.Marshaler = JwtAuthentication{} + +func (s JwtAuthentication) MarshalJSON() ([]byte, error) { + type wrapper JwtAuthentication + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling JwtAuthentication: %+v", err) + } + + var decoded map[string]interface{} + if err := json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling JwtAuthentication: %+v", err) + } + decoded["@type"] = "#Microsoft.VideoAnalyzer.JwtAuthentication" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling JwtAuthentication: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &JwtAuthentication{} + +func (s *JwtAuthentication) UnmarshalJSON(bytes []byte) error { + type alias JwtAuthentication + var decoded alias + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling into JwtAuthentication: %+v", err) + } + + s.Audiences = decoded.Audiences + s.Claims = decoded.Claims + s.Issuers = decoded.Issuers + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling JwtAuthentication into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["keys"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Keys into list []json.RawMessage: %+v", err) + } + + output := make([]TokenKey, 0) + for i, val := range listTemp { + impl, err := unmarshalTokenKeyImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Keys' for 'JwtAuthentication': %+v", i, err) + } + output = append(output, impl) + } + s.Keys = &output + } + return nil +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_rsatokenkey.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_rsatokenkey.go new file mode 100644 index 000000000000..b7f76314b54e --- /dev/null +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_rsatokenkey.go @@ -0,0 +1,41 @@ +package videoanalyzer + +import ( + "encoding/json" + "fmt" +) + +var _ TokenKey = RsaTokenKey{} + +type RsaTokenKey struct { + Alg AccessPolicyRsaAlgo `json:"alg"` + E string `json:"e"` + N string `json:"n"` + + // Fields inherited from TokenKey + Kid string `json:"kid"` +} + +var _ json.Marshaler = RsaTokenKey{} + +func (s RsaTokenKey) MarshalJSON() ([]byte, error) { + type wrapper RsaTokenKey + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling RsaTokenKey: %+v", err) + } + + var decoded map[string]interface{} + if err := json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling RsaTokenKey: %+v", err) + } + decoded["@type"] = "#Microsoft.VideoAnalyzer.RsaTokenKey" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling RsaTokenKey: %+v", err) + } + + return encoded, nil +} diff --git a/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_tokenkey.go b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_tokenkey.go new file mode 100644 index 000000000000..aa245fd384ac --- /dev/null +++ b/internal/services/videoanalyzer/sdk/2021-05-01-preview/videoanalyzer/model_tokenkey.go @@ -0,0 +1,53 @@ +package videoanalyzer + +import ( + "encoding/json" + "fmt" + "strings" +) + +type TokenKey interface { +} + +func unmarshalTokenKeyImplementation(input []byte) (TokenKey, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling TokenKey into map[string]interface: %+v", err) + } + + value, ok := temp["@type"].(string) + if !ok { + return nil, nil + } + + if strings.EqualFold(value, "#Microsoft.VideoAnalyzer.EccTokenKey") { + var out EccTokenKey + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into EccTokenKey: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "#Microsoft.VideoAnalyzer.RsaTokenKey") { + var out RsaTokenKey + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into RsaTokenKey: %+v", err) + } + return out, nil + } + + type RawTokenKeyImpl struct { + Type string `json:"-"` + Values map[string]interface{} `json:"-"` + } + out := RawTokenKeyImpl{ + Type: value, + Values: temp, + } + return out, nil + +} diff --git a/internal/services/videoanalyzer/video_analyzer_edge_module_resource.go b/internal/services/videoanalyzer/video_analyzer_edge_module_resource.go index 6ecc218eea09..b8bc1439d9cb 100644 --- a/internal/services/videoanalyzer/video_analyzer_edge_module_resource.go +++ b/internal/services/videoanalyzer/video_analyzer_edge_module_resource.go @@ -105,9 +105,9 @@ func resourceVideoAnalyzerEdgeModuleRead(d *pluginsdk.ResourceData, meta interfa return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("video_analyzer_name", id.VideoAnalyzerName) + d.Set("name", id.EdgeModuleName) + d.Set("resource_group_name", id.ResourceGroupName) + d.Set("video_analyzer_name", id.AccountName) return nil } diff --git a/internal/services/videoanalyzer/video_analyzer_resource.go b/internal/services/videoanalyzer/video_analyzer_resource.go index f0c16dab6224..4e3fc782655e 100644 --- a/internal/services/videoanalyzer/video_analyzer_resource.go +++ b/internal/services/videoanalyzer/video_analyzer_resource.go @@ -165,8 +165,8 @@ func resourceVideoAnalyzerRead(d *pluginsdk.ResourceData, meta interface{}) erro return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.AccountName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", azure.NormalizeLocation(model.Location)) diff --git a/internal/services/vmware/sdk/2020-03-20/authorizations/constants.go b/internal/services/vmware/sdk/2020-03-20/authorizations/constants.go index bb752f337a1f..e7202db9a387 100644 --- a/internal/services/vmware/sdk/2020-03-20/authorizations/constants.go +++ b/internal/services/vmware/sdk/2020-03-20/authorizations/constants.go @@ -1,5 +1,7 @@ package authorizations +import "strings" + type ExpressRouteAuthorizationProvisioningState string const ( @@ -7,3 +9,26 @@ const ( ExpressRouteAuthorizationProvisioningStateSucceeded ExpressRouteAuthorizationProvisioningState = "Succeeded" ExpressRouteAuthorizationProvisioningStateUpdating ExpressRouteAuthorizationProvisioningState = "Updating" ) + +func PossibleValuesForExpressRouteAuthorizationProvisioningState() []string { + return []string{ + string(ExpressRouteAuthorizationProvisioningStateFailed), + string(ExpressRouteAuthorizationProvisioningStateSucceeded), + string(ExpressRouteAuthorizationProvisioningStateUpdating), + } +} + +func parseExpressRouteAuthorizationProvisioningState(input string) (*ExpressRouteAuthorizationProvisioningState, error) { + vals := map[string]ExpressRouteAuthorizationProvisioningState{ + "failed": ExpressRouteAuthorizationProvisioningStateFailed, + "succeeded": ExpressRouteAuthorizationProvisioningStateSucceeded, + "updating": ExpressRouteAuthorizationProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExpressRouteAuthorizationProvisioningState(input) + return &out, nil +} diff --git a/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization.go b/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization.go index 0dd89b71f772..338465da99b4 100644 --- a/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization.go +++ b/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = AuthorizationId{} + +// AuthorizationId is a struct representing the Resource ID for a Authorization type AuthorizationId struct { - SubscriptionId string - ResourceGroup string - PrivateCloudName string - Name string + SubscriptionId string + ResourceGroupName string + PrivateCloudName string + AuthorizationName string } -func NewAuthorizationID(subscriptionId, resourceGroup, privateCloudName, name string) AuthorizationId { +// NewAuthorizationID returns a new AuthorizationId struct +func NewAuthorizationID(subscriptionId string, resourceGroupName string, privateCloudName string, authorizationName string) AuthorizationId { return AuthorizationId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - PrivateCloudName: privateCloudName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, + AuthorizationName: authorizationName, } } -func (id AuthorizationId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Private Cloud Name %q", id.PrivateCloudName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Authorization", segmentsStr) -} - -func (id AuthorizationId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s/authorizations/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.PrivateCloudName, id.Name) -} - -// ParseAuthorizationID parses a Authorization ID into an AuthorizationId struct +// ParseAuthorizationID parses 'input' into a AuthorizationId func ParseAuthorizationID(input string) (*AuthorizationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AuthorizationId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AuthorizationId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := AuthorizationId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.PrivateCloudName, err = id.PopSegment("privateClouds"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("authorizations"); err != nil { - return nil, err + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.AuthorizationName, ok = parsed.Parsed["authorizationName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseAuthorizationIDInsensitively parses an Authorization ID into an AuthorizationId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseAuthorizationID method should be used instead for validation etc. +// ParseAuthorizationIDInsensitively parses 'input' case-insensitively into a AuthorizationId +// note: this method should only be used for API response data and not user input func ParseAuthorizationIDInsensitively(input string) (*AuthorizationId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(AuthorizationId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := AuthorizationId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := AuthorizationId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateClouds' segment - privateCloudsKey := "privateClouds" - for key := range id.Path { - if strings.EqualFold(key, privateCloudsKey) { - privateCloudsKey = key - break - } + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - if resourceId.PrivateCloudName, err = id.PopSegment(privateCloudsKey); err != nil { - return nil, err + + if id.AuthorizationName, ok = parsed.Parsed["authorizationName"]; !ok { + return nil, fmt.Errorf("the segment 'authorizationName' was not found in the resource id %q", input) } - // find the correct casing for the 'authorizations' segment - authorizationsKey := "authorizations" - for key := range id.Path { - if strings.EqualFold(key, authorizationsKey) { - authorizationsKey = key - break - } + return &id, nil +} + +// ValidateAuthorizationID checks that 'input' can be parsed as a Authorization ID +func ValidateAuthorizationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(authorizationsKey); err != nil { - return nil, err + + if _, err := ParseAuthorizationID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Authorization ID +func (id AuthorizationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s/authorizations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName, id.AuthorizationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Authorization ID +func (id AuthorizationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), + resourceids.StaticSegment("staticAuthorizations", "authorizations", "authorizations"), + resourceids.UserSpecifiedSegment("authorizationName", "authorizationValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Authorization ID +func (id AuthorizationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + fmt.Sprintf("Authorization Name: %q", id.AuthorizationName), + } + return fmt.Sprintf("Authorization (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization_test.go b/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization_test.go index b5318c4bb703..18231844615a 100644 --- a/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization_test.go +++ b/internal/services/vmware/sdk/2020-03-20/authorizations/id_authorization_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = AuthorizationId{} +var _ resourceids.ResourceId = AuthorizationId{} -func TestAuthorizationIDFormatter(t *testing.T) { - actual := NewAuthorizationID("{subscriptionId}", "{resourceGroupName}", "{privateCloudName}", "{authorizationName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/authorizations/{authorizationName}" +func TestNewAuthorizationID(t *testing.T) { + id := NewAuthorizationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue", "authorizationValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } + + if id.AuthorizationName != "authorizationValue" { + t.Fatalf("Expected %q but got %q for Segment 'AuthorizationName'", id.AuthorizationName, "authorizationValue") + } +} + +func TestFormatAuthorizationID(t *testing.T) { + actual := NewAuthorizationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue", "authorizationValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations/authorizationValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseAuthorizationID(t *testing.T) { Error bool Expected *AuthorizationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/authorizations/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/authorizations/{authorizationName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations/authorizationValue", Expected: &AuthorizationId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{authorizationName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + AuthorizationName: "authorizationValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.AVS/PRIVATECLOUDS/{PRIVATECLOUDNAME}/AUTHORIZATIONS/{AUTHORIZATIONNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations/authorizationValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseAuthorizationID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseAuthorizationID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.PrivateCloudName != v.Expected.PrivateCloudName { t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AuthorizationName != v.Expected.AuthorizationName { + t.Fatalf("Expected %q but got %q for AuthorizationName", v.Expected.AuthorizationName, actual.AuthorizationName) } + } } @@ -131,106 +148,132 @@ func TestParseAuthorizationIDInsensitively(t *testing.T) { Error bool Expected *AuthorizationId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/authorizations/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/authorizations/{authorizationName}", - Expected: &AuthorizationId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{authorizationName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateclouds/{privateCloudName}/authorizations/{authorizationName}", - Expected: &AuthorizationId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{authorizationName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/aUtHoRiZaTiOnS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PRIVATECLOUDS/{privateCloudName}/AUTHORIZATIONS/{authorizationName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations/authorizationValue", Expected: &AuthorizationId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{authorizationName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + AuthorizationName: "authorizationValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PrIvAtEcLoUdS/{privateCloudName}/AuThOrIzAtIoNs/{authorizationName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/authorizations/authorizationValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/aUtHoRiZaTiOnS/aUtHoRiZaTiOnVaLuE", Expected: &AuthorizationId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{authorizationName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", + AuthorizationName: "aUtHoRiZaTiOnVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/aUtHoRiZaTiOnS/aUtHoRiZaTiOnVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseAuthorizationIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,33 @@ func TestParseAuthorizationIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.PrivateCloudName != v.Expected.PrivateCloudName { t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.AuthorizationName != v.Expected.AuthorizationName { + t.Fatalf("Expected %q but got %q for AuthorizationName", v.Expected.AuthorizationName, actual.AuthorizationName) } + + } +} + +func TestSegmentsForAuthorizationId(t *testing.T) { + segments := AuthorizationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("AuthorizationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud.go b/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud.go index 6d08dbf877d0..b3edc5f94e20 100644 --- a/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud.go +++ b/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = PrivateCloudId{} + +// PrivateCloudId is a struct representing the Resource ID for a Private Cloud type PrivateCloudId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + PrivateCloudName string } -func NewPrivateCloudID(subscriptionId, resourceGroup, name string) PrivateCloudId { +// NewPrivateCloudID returns a new PrivateCloudId struct +func NewPrivateCloudID(subscriptionId string, resourceGroupName string, privateCloudName string) PrivateCloudId { return PrivateCloudId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, } } -func (id PrivateCloudId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Private Cloud", segmentsStr) -} - -func (id PrivateCloudId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParsePrivateCloudID parses a PrivateCloud ID into an PrivateCloudId struct +// ParsePrivateCloudID parses 'input' into a PrivateCloudId func ParsePrivateCloudID(input string) (*PrivateCloudId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := PrivateCloudId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := PrivateCloudId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("privateClouds"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParsePrivateCloudIDInsensitively parses an PrivateCloud ID into an PrivateCloudId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParsePrivateCloudID method should be used instead for validation etc. +// ParsePrivateCloudIDInsensitively parses 'input' case-insensitively into a PrivateCloudId +// note: this method should only be used for API response data and not user input func ParsePrivateCloudIDInsensitively(input string) (*PrivateCloudId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateCloudId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := PrivateCloudId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateClouds' segment - privateCloudsKey := "privateClouds" - for key := range id.Path { - if strings.EqualFold(key, privateCloudsKey) { - privateCloudsKey = key - break - } + return &id, nil +} + +// ValidatePrivateCloudID checks that 'input' can be parsed as a Private Cloud ID +func ValidatePrivateCloudID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(privateCloudsKey); err != nil { - return nil, err + + if _, err := ParsePrivateCloudID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Private Cloud ID +func (id PrivateCloudId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Cloud ID +func (id PrivateCloudId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Private Cloud ID +func (id PrivateCloudId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + } + return fmt.Sprintf("Private Cloud (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud_test.go b/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud_test.go index 45af0c8b1258..0d903dcfa290 100644 --- a/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud_test.go +++ b/internal/services/vmware/sdk/2020-03-20/authorizations/id_privatecloud_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = PrivateCloudId{} +var _ resourceids.ResourceId = PrivateCloudId{} -func TestPrivateCloudIDFormatter(t *testing.T) { - actual := NewPrivateCloudID("{subscriptionId}", "{resourceGroupName}", "{privateCloudName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}" +func TestNewPrivateCloudID(t *testing.T) { + id := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } +} + +func TestFormatPrivateCloudID(t *testing.T) { + actual := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParsePrivateCloudID(t *testing.T) { Error bool Expected *PrivateCloudId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.AVS/PRIVATECLOUDS/{PRIVATECLOUDNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParsePrivateCloudID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParsePrivateCloudID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } + } } @@ -115,90 +129,110 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { Error bool Expected *PrivateCloudId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}", - Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateclouds/{privateCloudName}", - Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PRIVATECLOUDS/{privateCloudName}", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PrIvAtEcLoUdS/{privateCloudName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,29 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } + + } +} + +func TestSegmentsForPrivateCloudId(t *testing.T) { + segments := PrivateCloudId{}.Segments() + if len(segments) == 0 { + t.Fatalf("PrivateCloudId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/authorizations/predicates.go b/internal/services/vmware/sdk/2020-03-20/authorizations/predicates.go index e3e74dad2d35..c15ae8929084 100644 --- a/internal/services/vmware/sdk/2020-03-20/authorizations/predicates.go +++ b/internal/services/vmware/sdk/2020-03-20/authorizations/predicates.go @@ -7,6 +7,7 @@ type ExpressRouteAuthorizationPredicate struct { } func (p ExpressRouteAuthorizationPredicate) Matches(input ExpressRouteAuthorization) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } diff --git a/internal/services/vmware/sdk/2020-03-20/clusters/constants.go b/internal/services/vmware/sdk/2020-03-20/clusters/constants.go index 736f163a3b9f..6b09f491a56c 100644 --- a/internal/services/vmware/sdk/2020-03-20/clusters/constants.go +++ b/internal/services/vmware/sdk/2020-03-20/clusters/constants.go @@ -1,5 +1,7 @@ package clusters +import "strings" + type ClusterProvisioningState string const ( @@ -9,3 +11,30 @@ const ( ClusterProvisioningStateSucceeded ClusterProvisioningState = "Succeeded" ClusterProvisioningStateUpdating ClusterProvisioningState = "Updating" ) + +func PossibleValuesForClusterProvisioningState() []string { + return []string{ + string(ClusterProvisioningStateCancelled), + string(ClusterProvisioningStateDeleting), + string(ClusterProvisioningStateFailed), + string(ClusterProvisioningStateSucceeded), + string(ClusterProvisioningStateUpdating), + } +} + +func parseClusterProvisioningState(input string) (*ClusterProvisioningState, error) { + vals := map[string]ClusterProvisioningState{ + "cancelled": ClusterProvisioningStateCancelled, + "deleting": ClusterProvisioningStateDeleting, + "failed": ClusterProvisioningStateFailed, + "succeeded": ClusterProvisioningStateSucceeded, + "updating": ClusterProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ClusterProvisioningState(input) + return &out, nil +} diff --git a/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster.go b/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster.go index 2a7134c1e18a..00e5edf64d89 100644 --- a/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster.go +++ b/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster.go @@ -7,120 +7,131 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ClusterId{} + +// ClusterId is a struct representing the Resource ID for a Cluster type ClusterId struct { - SubscriptionId string - ResourceGroup string - PrivateCloudName string - Name string + SubscriptionId string + ResourceGroupName string + PrivateCloudName string + ClusterName string } -func NewClusterID(subscriptionId, resourceGroup, privateCloudName, name string) ClusterId { +// NewClusterID returns a new ClusterId struct +func NewClusterID(subscriptionId string, resourceGroupName string, privateCloudName string, clusterName string) ClusterId { return ClusterId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - PrivateCloudName: privateCloudName, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, + ClusterName: clusterName, } } -func (id ClusterId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Private Cloud Name %q", id.PrivateCloudName), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Cluster", segmentsStr) -} - -func (id ClusterId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s/clusters/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.PrivateCloudName, id.Name) -} - -// ParseClusterID parses a Cluster ID into an ClusterId struct +// ParseClusterID parses 'input' into a ClusterId func ParseClusterID(input string) (*ClusterId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ClusterId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := ClusterId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := ClusterId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.PrivateCloudName, err = id.PopSegment("privateClouds"); err != nil { - return nil, err - } - if resourceId.Name, err = id.PopSegment("clusters"); err != nil { - return nil, err + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ClusterName, ok = parsed.Parsed["clusterName"]; !ok { + return nil, fmt.Errorf("the segment 'clusterName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseClusterIDInsensitively parses an Cluster ID into an ClusterId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseClusterID method should be used instead for validation etc. +// ParseClusterIDInsensitively parses 'input' case-insensitively into a ClusterId +// note: this method should only be used for API response data and not user input func ParseClusterIDInsensitively(input string) (*ClusterId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ClusterId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := ClusterId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } + var ok bool + id := ClusterId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateClouds' segment - privateCloudsKey := "privateClouds" - for key := range id.Path { - if strings.EqualFold(key, privateCloudsKey) { - privateCloudsKey = key - break - } + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - if resourceId.PrivateCloudName, err = id.PopSegment(privateCloudsKey); err != nil { - return nil, err + + if id.ClusterName, ok = parsed.Parsed["clusterName"]; !ok { + return nil, fmt.Errorf("the segment 'clusterName' was not found in the resource id %q", input) } - // find the correct casing for the 'clusters' segment - clustersKey := "clusters" - for key := range id.Path { - if strings.EqualFold(key, clustersKey) { - clustersKey = key - break - } + return &id, nil +} + +// ValidateClusterID checks that 'input' can be parsed as a Cluster ID +func ValidateClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(clustersKey); err != nil { - return nil, err + + if _, err := ParseClusterID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Cluster ID +func (id ClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s/clusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName, id.ClusterName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Cluster ID +func (id ClusterId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), + resourceids.StaticSegment("staticClusters", "clusters", "clusters"), + resourceids.UserSpecifiedSegment("clusterName", "clusterValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Cluster ID +func (id ClusterId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + fmt.Sprintf("Cluster Name: %q", id.ClusterName), + } + return fmt.Sprintf("Cluster (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster_test.go b/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster_test.go index ea067de4f331..24530ae10c62 100644 --- a/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster_test.go +++ b/internal/services/vmware/sdk/2020-03-20/clusters/id_cluster_test.go @@ -6,13 +6,33 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ClusterId{} +var _ resourceids.ResourceId = ClusterId{} -func TestClusterIDFormatter(t *testing.T) { - actual := NewClusterID("{subscriptionId}", "{resourceGroupName}", "{privateCloudName}", "{clusterName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}" +func TestNewClusterID(t *testing.T) { + id := NewClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue", "clusterValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } + + if id.ClusterName != "clusterValue" { + t.Fatalf("Expected %q but got %q for Segment 'ClusterName'", id.ClusterName, "clusterValue") + } +} + +func TestFormatClusterID(t *testing.T) { + actual := NewClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue", "clusterValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters/clusterValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,79 +42,72 @@ func TestParseClusterID(t *testing.T) { Error bool Expected *ClusterId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters/clusterValue", Expected: &ClusterId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{clusterName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + ClusterName: "clusterValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.AVS/PRIVATECLOUDS/{PRIVATECLOUDNAME}/CLUSTERS/{CLUSTERNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters/clusterValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -104,7 +117,7 @@ func TestParseClusterID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -113,15 +126,19 @@ func TestParseClusterID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.PrivateCloudName != v.Expected.PrivateCloudName { t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.ClusterName != v.Expected.ClusterName { + t.Fatalf("Expected %q but got %q for ClusterName", v.Expected.ClusterName, actual.ClusterName) } + } } @@ -131,106 +148,132 @@ func TestParseClusterIDInsensitively(t *testing.T) { Error bool Expected *ClusterId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for PrivateCloudName - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}", - Expected: &ClusterId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{clusterName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateclouds/{privateCloudName}/clusters/{clusterName}", - Expected: &ClusterId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{clusterName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/cLuStErS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PRIVATECLOUDS/{privateCloudName}/CLUSTERS/{clusterName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters/clusterValue", Expected: &ClusterId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{clusterName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + ClusterName: "clusterValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PrIvAtEcLoUdS/{privateCloudName}/ClUsTeRs/{clusterName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/clusters/clusterValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/cLuStErS/cLuStErVaLuE", Expected: &ClusterId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - PrivateCloudName: "{privateCloudName}", - Name: "{clusterName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", + ClusterName: "cLuStErVaLuE", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/cLuStErS/cLuStErVaLuE/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -240,7 +283,7 @@ func TestParseClusterIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -249,14 +292,33 @@ func TestParseClusterIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + if actual.PrivateCloudName != v.Expected.PrivateCloudName { t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.ClusterName != v.Expected.ClusterName { + t.Fatalf("Expected %q but got %q for ClusterName", v.Expected.ClusterName, actual.ClusterName) } + + } +} + +func TestSegmentsForClusterId(t *testing.T) { + segments := ClusterId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ClusterId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud.go b/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud.go index 6ed5b1317fbb..0a2a00c61809 100644 --- a/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud.go +++ b/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = PrivateCloudId{} + +// PrivateCloudId is a struct representing the Resource ID for a Private Cloud type PrivateCloudId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + PrivateCloudName string } -func NewPrivateCloudID(subscriptionId, resourceGroup, name string) PrivateCloudId { +// NewPrivateCloudID returns a new PrivateCloudId struct +func NewPrivateCloudID(subscriptionId string, resourceGroupName string, privateCloudName string) PrivateCloudId { return PrivateCloudId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, } } -func (id PrivateCloudId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Private Cloud", segmentsStr) -} - -func (id PrivateCloudId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParsePrivateCloudID parses a PrivateCloud ID into an PrivateCloudId struct +// ParsePrivateCloudID parses 'input' into a PrivateCloudId func ParsePrivateCloudID(input string) (*PrivateCloudId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := PrivateCloudId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := PrivateCloudId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("privateClouds"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParsePrivateCloudIDInsensitively parses an PrivateCloud ID into an PrivateCloudId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParsePrivateCloudID method should be used instead for validation etc. +// ParsePrivateCloudIDInsensitively parses 'input' case-insensitively into a PrivateCloudId +// note: this method should only be used for API response data and not user input func ParsePrivateCloudIDInsensitively(input string) (*PrivateCloudId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateCloudId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := PrivateCloudId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateClouds' segment - privateCloudsKey := "privateClouds" - for key := range id.Path { - if strings.EqualFold(key, privateCloudsKey) { - privateCloudsKey = key - break - } + return &id, nil +} + +// ValidatePrivateCloudID checks that 'input' can be parsed as a Private Cloud ID +func ValidatePrivateCloudID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(privateCloudsKey); err != nil { - return nil, err + + if _, err := ParsePrivateCloudID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Private Cloud ID +func (id PrivateCloudId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Cloud ID +func (id PrivateCloudId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Private Cloud ID +func (id PrivateCloudId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + } + return fmt.Sprintf("Private Cloud (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud_test.go b/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud_test.go index 3423b1544ec9..a6a0cb5c8a37 100644 --- a/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud_test.go +++ b/internal/services/vmware/sdk/2020-03-20/clusters/id_privatecloud_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = PrivateCloudId{} +var _ resourceids.ResourceId = PrivateCloudId{} -func TestPrivateCloudIDFormatter(t *testing.T) { - actual := NewPrivateCloudID("{subscriptionId}", "{resourceGroupName}", "{privateCloudName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}" +func TestNewPrivateCloudID(t *testing.T) { + id := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } +} + +func TestFormatPrivateCloudID(t *testing.T) { + actual := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParsePrivateCloudID(t *testing.T) { Error bool Expected *PrivateCloudId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.AVS/PRIVATECLOUDS/{PRIVATECLOUDNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParsePrivateCloudID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParsePrivateCloudID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } + } } @@ -115,90 +129,110 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { Error bool Expected *PrivateCloudId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}", - Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateclouds/{privateCloudName}", - Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PRIVATECLOUDS/{privateCloudName}", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PrIvAtEcLoUdS/{privateCloudName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,29 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } + + } +} + +func TestSegmentsForPrivateCloudId(t *testing.T) { + segments := PrivateCloudId{}.Segments() + if len(segments) == 0 { + t.Fatalf("PrivateCloudId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/clusters/predicates.go b/internal/services/vmware/sdk/2020-03-20/clusters/predicates.go index 68ab87c0705b..e542c0c7d56d 100644 --- a/internal/services/vmware/sdk/2020-03-20/clusters/predicates.go +++ b/internal/services/vmware/sdk/2020-03-20/clusters/predicates.go @@ -7,6 +7,7 @@ type ClusterPredicate struct { } func (p ClusterPredicate) Matches(input Cluster) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/client.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/client.go new file mode 100644 index 000000000000..990e055ce033 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/client.go @@ -0,0 +1,15 @@ +package hcxenterprisesites + +import "github.com/Azure/go-autorest/autorest" + +type HcxEnterpriseSitesClient struct { + Client autorest.Client + baseUri string +} + +func NewHcxEnterpriseSitesClientWithBaseURI(endpoint string) HcxEnterpriseSitesClient { + return HcxEnterpriseSitesClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/constants.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/constants.go new file mode 100644 index 000000000000..42bb60ce71ba --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/constants.go @@ -0,0 +1,37 @@ +package hcxenterprisesites + +import "strings" + +type HcxEnterpriseSiteStatus string + +const ( + HcxEnterpriseSiteStatusAvailable HcxEnterpriseSiteStatus = "Available" + HcxEnterpriseSiteStatusConsumed HcxEnterpriseSiteStatus = "Consumed" + HcxEnterpriseSiteStatusDeactivated HcxEnterpriseSiteStatus = "Deactivated" + HcxEnterpriseSiteStatusDeleted HcxEnterpriseSiteStatus = "Deleted" +) + +func PossibleValuesForHcxEnterpriseSiteStatus() []string { + return []string{ + string(HcxEnterpriseSiteStatusAvailable), + string(HcxEnterpriseSiteStatusConsumed), + string(HcxEnterpriseSiteStatusDeactivated), + string(HcxEnterpriseSiteStatusDeleted), + } +} + +func parseHcxEnterpriseSiteStatus(input string) (*HcxEnterpriseSiteStatus, error) { + vals := map[string]HcxEnterpriseSiteStatus{ + "available": HcxEnterpriseSiteStatusAvailable, + "consumed": HcxEnterpriseSiteStatusConsumed, + "deactivated": HcxEnterpriseSiteStatusDeactivated, + "deleted": HcxEnterpriseSiteStatusDeleted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := HcxEnterpriseSiteStatus(input) + return &out, nil +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_hcxenterprisesite.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_hcxenterprisesite.go new file mode 100644 index 000000000000..a4d5ca2d0c11 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_hcxenterprisesite.go @@ -0,0 +1,137 @@ +package hcxenterprisesites + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = HcxEnterpriseSiteId{} + +// HcxEnterpriseSiteId is a struct representing the Resource ID for a Hcx Enterprise Site +type HcxEnterpriseSiteId struct { + SubscriptionId string + ResourceGroupName string + PrivateCloudName string + HcxEnterpriseSiteName string +} + +// NewHcxEnterpriseSiteID returns a new HcxEnterpriseSiteId struct +func NewHcxEnterpriseSiteID(subscriptionId string, resourceGroupName string, privateCloudName string, hcxEnterpriseSiteName string) HcxEnterpriseSiteId { + return HcxEnterpriseSiteId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, + HcxEnterpriseSiteName: hcxEnterpriseSiteName, + } +} + +// ParseHcxEnterpriseSiteID parses 'input' into a HcxEnterpriseSiteId +func ParseHcxEnterpriseSiteID(input string) (*HcxEnterpriseSiteId, error) { + parser := resourceids.NewParserFromResourceIdType(HcxEnterpriseSiteId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := HcxEnterpriseSiteId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) + } + + if id.HcxEnterpriseSiteName, ok = parsed.Parsed["hcxEnterpriseSiteName"]; !ok { + return nil, fmt.Errorf("the segment 'hcxEnterpriseSiteName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseHcxEnterpriseSiteIDInsensitively parses 'input' case-insensitively into a HcxEnterpriseSiteId +// note: this method should only be used for API response data and not user input +func ParseHcxEnterpriseSiteIDInsensitively(input string) (*HcxEnterpriseSiteId, error) { + parser := resourceids.NewParserFromResourceIdType(HcxEnterpriseSiteId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := HcxEnterpriseSiteId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) + } + + if id.HcxEnterpriseSiteName, ok = parsed.Parsed["hcxEnterpriseSiteName"]; !ok { + return nil, fmt.Errorf("the segment 'hcxEnterpriseSiteName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateHcxEnterpriseSiteID checks that 'input' can be parsed as a Hcx Enterprise Site ID +func ValidateHcxEnterpriseSiteID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseHcxEnterpriseSiteID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Hcx Enterprise Site ID +func (id HcxEnterpriseSiteId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s/hcxEnterpriseSites/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName, id.HcxEnterpriseSiteName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Hcx Enterprise Site ID +func (id HcxEnterpriseSiteId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), + resourceids.StaticSegment("staticHcxEnterpriseSites", "hcxEnterpriseSites", "hcxEnterpriseSites"), + resourceids.UserSpecifiedSegment("hcxEnterpriseSiteName", "hcxEnterpriseSiteValue"), + } +} + +// String returns a human-readable description of this Hcx Enterprise Site ID +func (id HcxEnterpriseSiteId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + fmt.Sprintf("Hcx Enterprise Site Name: %q", id.HcxEnterpriseSiteName), + } + return fmt.Sprintf("Hcx Enterprise Site (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_hcxenterprisesite_test.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_hcxenterprisesite_test.go new file mode 100644 index 000000000000..71abc0690f11 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_hcxenterprisesite_test.go @@ -0,0 +1,324 @@ +package hcxenterprisesites + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = HcxEnterpriseSiteId{} + +func TestNewHcxEnterpriseSiteID(t *testing.T) { + id := NewHcxEnterpriseSiteID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue", "hcxEnterpriseSiteValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } + + if id.HcxEnterpriseSiteName != "hcxEnterpriseSiteValue" { + t.Fatalf("Expected %q but got %q for Segment 'HcxEnterpriseSiteName'", id.HcxEnterpriseSiteName, "hcxEnterpriseSiteValue") + } +} + +func TestFormatHcxEnterpriseSiteID(t *testing.T) { + actual := NewHcxEnterpriseSiteID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue", "hcxEnterpriseSiteValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites/hcxEnterpriseSiteValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseHcxEnterpriseSiteID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *HcxEnterpriseSiteId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites/hcxEnterpriseSiteValue", + Expected: &HcxEnterpriseSiteId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + HcxEnterpriseSiteName: "hcxEnterpriseSiteValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites/hcxEnterpriseSiteValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseHcxEnterpriseSiteID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) + } + + if actual.HcxEnterpriseSiteName != v.Expected.HcxEnterpriseSiteName { + t.Fatalf("Expected %q but got %q for HcxEnterpriseSiteName", v.Expected.HcxEnterpriseSiteName, actual.HcxEnterpriseSiteName) + } + + } +} + +func TestParseHcxEnterpriseSiteIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *HcxEnterpriseSiteId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/hCxEnTeRpRiSeSiTeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites/hcxEnterpriseSiteValue", + Expected: &HcxEnterpriseSiteId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + HcxEnterpriseSiteName: "hcxEnterpriseSiteValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/hcxEnterpriseSites/hcxEnterpriseSiteValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/hCxEnTeRpRiSeSiTeS/hCxEnTeRpRiSeSiTeVaLuE", + Expected: &HcxEnterpriseSiteId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", + HcxEnterpriseSiteName: "hCxEnTeRpRiSeSiTeVaLuE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/hCxEnTeRpRiSeSiTeS/hCxEnTeRpRiSeSiTeVaLuE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseHcxEnterpriseSiteIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) + } + + if actual.HcxEnterpriseSiteName != v.Expected.HcxEnterpriseSiteName { + t.Fatalf("Expected %q but got %q for HcxEnterpriseSiteName", v.Expected.HcxEnterpriseSiteName, actual.HcxEnterpriseSiteName) + } + + } +} + +func TestSegmentsForHcxEnterpriseSiteId(t *testing.T) { + segments := HcxEnterpriseSiteId{}.Segments() + if len(segments) == 0 { + t.Fatalf("HcxEnterpriseSiteId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_privatecloud.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_privatecloud.go new file mode 100644 index 000000000000..db0de22fd132 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_privatecloud.go @@ -0,0 +1,124 @@ +package hcxenterprisesites + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = PrivateCloudId{} + +// PrivateCloudId is a struct representing the Resource ID for a Private Cloud +type PrivateCloudId struct { + SubscriptionId string + ResourceGroupName string + PrivateCloudName string +} + +// NewPrivateCloudID returns a new PrivateCloudId struct +func NewPrivateCloudID(subscriptionId string, resourceGroupName string, privateCloudName string) PrivateCloudId { + return PrivateCloudId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, + } +} + +// ParsePrivateCloudID parses 'input' into a PrivateCloudId +func ParsePrivateCloudID(input string) (*PrivateCloudId, error) { + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := PrivateCloudId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParsePrivateCloudIDInsensitively parses 'input' case-insensitively into a PrivateCloudId +// note: this method should only be used for API response data and not user input +func ParsePrivateCloudIDInsensitively(input string) (*PrivateCloudId, error) { + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := PrivateCloudId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidatePrivateCloudID checks that 'input' can be parsed as a Private Cloud ID +func ValidatePrivateCloudID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParsePrivateCloudID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Private Cloud ID +func (id PrivateCloudId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Cloud ID +func (id PrivateCloudId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), + } +} + +// String returns a human-readable description of this Private Cloud ID +func (id PrivateCloudId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + } + return fmt.Sprintf("Private Cloud (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_privatecloud_test.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_privatecloud_test.go new file mode 100644 index 000000000000..02c4081733f8 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/id_privatecloud_test.go @@ -0,0 +1,279 @@ +package hcxenterprisesites + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = PrivateCloudId{} + +func TestNewPrivateCloudID(t *testing.T) { + id := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } +} + +func TestFormatPrivateCloudID(t *testing.T) { + actual := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParsePrivateCloudID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateCloudId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Expected: &PrivateCloudId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateCloudID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) + } + + } +} + +func TestParsePrivateCloudIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateCloudId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", + Expected: &PrivateCloudId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", + Expected: &PrivateCloudId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateCloudIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) + } + + } +} + +func TestSegmentsForPrivateCloudId(t *testing.T) { + segments := PrivateCloudId{}.Segments() + if len(segments) == 0 { + t.Fatalf("PrivateCloudId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_createorupdate_autorest.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_createorupdate_autorest.go new file mode 100644 index 000000000000..272b3f156b12 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_createorupdate_autorest.go @@ -0,0 +1,65 @@ +package hcxenterprisesites + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateResponse struct { + HttpResponse *http.Response + Model *HcxEnterpriseSite +} + +// CreateOrUpdate ... +func (c HcxEnterpriseSitesClient) CreateOrUpdate(ctx context.Context, id HcxEnterpriseSiteId, input HcxEnterpriseSite) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c HcxEnterpriseSitesClient) preparerForCreateOrUpdate(ctx context.Context, id HcxEnterpriseSiteId, input HcxEnterpriseSite) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c HcxEnterpriseSitesClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_delete_autorest.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_delete_autorest.go new file mode 100644 index 000000000000..2a137eb7911c --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_delete_autorest.go @@ -0,0 +1,61 @@ +package hcxenterprisesites + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c HcxEnterpriseSitesClient) Delete(ctx context.Context, id HcxEnterpriseSiteId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c HcxEnterpriseSitesClient) preparerForDelete(ctx context.Context, id HcxEnterpriseSiteId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c HcxEnterpriseSitesClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_get_autorest.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_get_autorest.go new file mode 100644 index 000000000000..b939bcacc13d --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_get_autorest.go @@ -0,0 +1,64 @@ +package hcxenterprisesites + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *HcxEnterpriseSite +} + +// Get ... +func (c HcxEnterpriseSitesClient) Get(ctx context.Context, id HcxEnterpriseSiteId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c HcxEnterpriseSitesClient) preparerForGet(ctx context.Context, id HcxEnterpriseSiteId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c HcxEnterpriseSitesClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_list_autorest.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_list_autorest.go new file mode 100644 index 000000000000..5aebcec842dd --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/method_list_autorest.go @@ -0,0 +1,183 @@ +package hcxenterprisesites + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListResponse struct { + HttpResponse *http.Response + Model *[]HcxEnterpriseSite + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListResponse, error) +} + +type ListCompleteResult struct { + Items []HcxEnterpriseSite +} + +func (r ListResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListResponse) LoadMore(ctx context.Context) (resp ListResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// List ... +func (c HcxEnterpriseSitesClient) List(ctx context.Context, id PrivateCloudId) (resp ListResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "List", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "List", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForList(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "List", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListComplete retrieves all of the results into a single object +func (c HcxEnterpriseSitesClient) ListComplete(ctx context.Context, id PrivateCloudId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, HcxEnterpriseSitePredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c HcxEnterpriseSitesClient) ListCompleteMatchingPredicate(ctx context.Context, id PrivateCloudId, predicate HcxEnterpriseSitePredicate) (resp ListCompleteResult, err error) { + items := make([]HcxEnterpriseSite, 0) + + page, err := c.List(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForList prepares the List request. +func (c HcxEnterpriseSitesClient) preparerForList(ctx context.Context, id PrivateCloudId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/hcxEnterpriseSites", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListWithNextLink prepares the List request with the given nextLink token. +func (c HcxEnterpriseSitesClient) preparerForListWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c HcxEnterpriseSitesClient) responderForList(resp *http.Response) (result ListResponse, err error) { + type page struct { + Values []HcxEnterpriseSite `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListResponse, err error) { + req, err := c.preparerForListWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "hcxenterprisesites.HcxEnterpriseSitesClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/model_hcxenterprisesite.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/model_hcxenterprisesite.go new file mode 100644 index 000000000000..51ab5653b201 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/model_hcxenterprisesite.go @@ -0,0 +1,8 @@ +package hcxenterprisesites + +type HcxEnterpriseSite struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *HcxEnterpriseSiteProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/model_hcxenterprisesiteproperties.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/model_hcxenterprisesiteproperties.go new file mode 100644 index 000000000000..f18b4eb9c34b --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/model_hcxenterprisesiteproperties.go @@ -0,0 +1,6 @@ +package hcxenterprisesites + +type HcxEnterpriseSiteProperties struct { + ActivationKey *string `json:"activationKey,omitempty"` + Status *HcxEnterpriseSiteStatus `json:"status,omitempty"` +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/predicates.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/predicates.go new file mode 100644 index 000000000000..fb7d5edf1db5 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/predicates.go @@ -0,0 +1,24 @@ +package hcxenterprisesites + +type HcxEnterpriseSitePredicate struct { + Id *string + Name *string + Type *string +} + +func (p HcxEnterpriseSitePredicate) Matches(input HcxEnterpriseSite) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/version.go b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/version.go new file mode 100644 index 000000000000..b2fc96749ca6 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/hcxenterprisesites/version.go @@ -0,0 +1,9 @@ +package hcxenterprisesites + +import "fmt" + +const defaultApiVersion = "2020-03-20" + +func userAgent() string { + return fmt.Sprintf("pandora/hcxenterprisesites/%s", defaultApiVersion) +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/client.go b/internal/services/vmware/sdk/2020-03-20/locations/client.go new file mode 100644 index 000000000000..e3648c22a0bb --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/client.go @@ -0,0 +1,15 @@ +package locations + +import "github.com/Azure/go-autorest/autorest" + +type LocationsClient struct { + Client autorest.Client + baseUri string +} + +func NewLocationsClientWithBaseURI(endpoint string) LocationsClient { + return LocationsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/constants.go b/internal/services/vmware/sdk/2020-03-20/locations/constants.go new file mode 100644 index 000000000000..46930b0bebab --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/constants.go @@ -0,0 +1,62 @@ +package locations + +import "strings" + +type QuotaEnabled string + +const ( + QuotaEnabledDisabled QuotaEnabled = "Disabled" + QuotaEnabledEnabled QuotaEnabled = "Enabled" +) + +func PossibleValuesForQuotaEnabled() []string { + return []string{ + string(QuotaEnabledDisabled), + string(QuotaEnabledEnabled), + } +} + +func parseQuotaEnabled(input string) (*QuotaEnabled, error) { + vals := map[string]QuotaEnabled{ + "disabled": QuotaEnabledDisabled, + "enabled": QuotaEnabledEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := QuotaEnabled(input) + return &out, nil +} + +type TrialStatus string + +const ( + TrialStatusTrialAvailable TrialStatus = "TrialAvailable" + TrialStatusTrialDisabled TrialStatus = "TrialDisabled" + TrialStatusTrialUsed TrialStatus = "TrialUsed" +) + +func PossibleValuesForTrialStatus() []string { + return []string{ + string(TrialStatusTrialAvailable), + string(TrialStatusTrialDisabled), + string(TrialStatusTrialUsed), + } +} + +func parseTrialStatus(input string) (*TrialStatus, error) { + vals := map[string]TrialStatus{ + "trialavailable": TrialStatusTrialAvailable, + "trialdisabled": TrialStatusTrialDisabled, + "trialused": TrialStatusTrialUsed, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TrialStatus(input) + return &out, nil +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/id_location.go b/internal/services/vmware/sdk/2020-03-20/locations/id_location.go new file mode 100644 index 000000000000..ddc2c7cfcd52 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/id_location.go @@ -0,0 +1,111 @@ +package locations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + Location string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, location string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + Location: location, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.AVS/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.Location) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("location", "locationValue"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location: %q", id.Location), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/id_location_test.go b/internal/services/vmware/sdk/2020-03-20/locations/id_location_test.go new file mode 100644 index 000000000000..3029217e324e --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/id_location_test.go @@ -0,0 +1,234 @@ +package locations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.Location != "locationValue" { + t.Fatalf("Expected %q but got %q for Segment 'Location'", id.Location, "locationValue") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations/locationValue" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations/locationValue", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "locationValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations/locationValue/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.Location != v.Expected.Location { + t.Fatalf("Expected %q but got %q for Location", v.Expected.Location, actual.Location) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.aVs/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations/locationValue", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "locationValue", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.AVS/locations/locationValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.aVs/lOcAtIoNs/lOcAtIoNvAlUe", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + Location: "lOcAtIoNvAlUe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.aVs/lOcAtIoNs/lOcAtIoNvAlUe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.Location != v.Expected.Location { + t.Fatalf("Expected %q but got %q for Location", v.Expected.Location, actual.Location) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/method_checkquotaavailability_autorest.go b/internal/services/vmware/sdk/2020-03-20/locations/method_checkquotaavailability_autorest.go new file mode 100644 index 000000000000..3237df800c9e --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/method_checkquotaavailability_autorest.go @@ -0,0 +1,65 @@ +package locations + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CheckQuotaAvailabilityResponse struct { + HttpResponse *http.Response + Model *Quota +} + +// CheckQuotaAvailability ... +func (c LocationsClient) CheckQuotaAvailability(ctx context.Context, id LocationId) (result CheckQuotaAvailabilityResponse, err error) { + req, err := c.preparerForCheckQuotaAvailability(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "locations.LocationsClient", "CheckQuotaAvailability", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "locations.LocationsClient", "CheckQuotaAvailability", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCheckQuotaAvailability(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "locations.LocationsClient", "CheckQuotaAvailability", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCheckQuotaAvailability prepares the CheckQuotaAvailability request. +func (c LocationsClient) preparerForCheckQuotaAvailability(ctx context.Context, id LocationId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/checkQuotaAvailability", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCheckQuotaAvailability handles the response to the CheckQuotaAvailability request. The method always +// closes the http.Response Body. +func (c LocationsClient) responderForCheckQuotaAvailability(resp *http.Response) (result CheckQuotaAvailabilityResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/method_checktrialavailability_autorest.go b/internal/services/vmware/sdk/2020-03-20/locations/method_checktrialavailability_autorest.go new file mode 100644 index 000000000000..b0d3ce07926d --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/method_checktrialavailability_autorest.go @@ -0,0 +1,65 @@ +package locations + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CheckTrialAvailabilityResponse struct { + HttpResponse *http.Response + Model *Trial +} + +// CheckTrialAvailability ... +func (c LocationsClient) CheckTrialAvailability(ctx context.Context, id LocationId) (result CheckTrialAvailabilityResponse, err error) { + req, err := c.preparerForCheckTrialAvailability(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "locations.LocationsClient", "CheckTrialAvailability", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "locations.LocationsClient", "CheckTrialAvailability", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCheckTrialAvailability(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "locations.LocationsClient", "CheckTrialAvailability", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCheckTrialAvailability prepares the CheckTrialAvailability request. +func (c LocationsClient) preparerForCheckTrialAvailability(ctx context.Context, id LocationId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/checkTrialAvailability", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCheckTrialAvailability handles the response to the CheckTrialAvailability request. The method always +// closes the http.Response Body. +func (c LocationsClient) responderForCheckTrialAvailability(resp *http.Response) (result CheckTrialAvailabilityResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/model_quota.go b/internal/services/vmware/sdk/2020-03-20/locations/model_quota.go new file mode 100644 index 000000000000..df5d2c6ebf40 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/model_quota.go @@ -0,0 +1,6 @@ +package locations + +type Quota struct { + HostsRemaining *map[string]int64 `json:"hostsRemaining,omitempty"` + QuotaEnabled *QuotaEnabled `json:"quotaEnabled,omitempty"` +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/model_trial.go b/internal/services/vmware/sdk/2020-03-20/locations/model_trial.go new file mode 100644 index 000000000000..fae05bad5481 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/model_trial.go @@ -0,0 +1,6 @@ +package locations + +type Trial struct { + AvailableHosts *int64 `json:"availableHosts,omitempty"` + Status *TrialStatus `json:"status,omitempty"` +} diff --git a/internal/services/vmware/sdk/2020-03-20/locations/version.go b/internal/services/vmware/sdk/2020-03-20/locations/version.go new file mode 100644 index 000000000000..39ca89a498a7 --- /dev/null +++ b/internal/services/vmware/sdk/2020-03-20/locations/version.go @@ -0,0 +1,9 @@ +package locations + +import "fmt" + +const defaultApiVersion = "2020-03-20" + +func userAgent() string { + return fmt.Sprintf("pandora/locations/%s", defaultApiVersion) +} diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/constants.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/constants.go index 74c0df4f5a6c..357739d656ec 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/constants.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/constants.go @@ -1,5 +1,7 @@ package privateclouds +import "strings" + type ClusterProvisioningState string const ( @@ -10,6 +12,33 @@ const ( ClusterProvisioningStateUpdating ClusterProvisioningState = "Updating" ) +func PossibleValuesForClusterProvisioningState() []string { + return []string{ + string(ClusterProvisioningStateCancelled), + string(ClusterProvisioningStateDeleting), + string(ClusterProvisioningStateFailed), + string(ClusterProvisioningStateSucceeded), + string(ClusterProvisioningStateUpdating), + } +} + +func parseClusterProvisioningState(input string) (*ClusterProvisioningState, error) { + vals := map[string]ClusterProvisioningState{ + "cancelled": ClusterProvisioningStateCancelled, + "deleting": ClusterProvisioningStateDeleting, + "failed": ClusterProvisioningStateFailed, + "succeeded": ClusterProvisioningStateSucceeded, + "updating": ClusterProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ClusterProvisioningState(input) + return &out, nil +} + type InternetEnum string const ( @@ -17,6 +46,27 @@ const ( InternetEnumEnabled InternetEnum = "Enabled" ) +func PossibleValuesForInternetEnum() []string { + return []string{ + string(InternetEnumDisabled), + string(InternetEnumEnabled), + } +} + +func parseInternetEnum(input string) (*InternetEnum, error) { + vals := map[string]InternetEnum{ + "disabled": InternetEnumDisabled, + "enabled": InternetEnumEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := InternetEnum(input) + return &out, nil +} + type PrivateCloudProvisioningState string const ( @@ -29,9 +79,61 @@ const ( PrivateCloudProvisioningStateUpdating PrivateCloudProvisioningState = "Updating" ) +func PossibleValuesForPrivateCloudProvisioningState() []string { + return []string{ + string(PrivateCloudProvisioningStateBuilding), + string(PrivateCloudProvisioningStateCancelled), + string(PrivateCloudProvisioningStateDeleting), + string(PrivateCloudProvisioningStateFailed), + string(PrivateCloudProvisioningStatePending), + string(PrivateCloudProvisioningStateSucceeded), + string(PrivateCloudProvisioningStateUpdating), + } +} + +func parsePrivateCloudProvisioningState(input string) (*PrivateCloudProvisioningState, error) { + vals := map[string]PrivateCloudProvisioningState{ + "building": PrivateCloudProvisioningStateBuilding, + "cancelled": PrivateCloudProvisioningStateCancelled, + "deleting": PrivateCloudProvisioningStateDeleting, + "failed": PrivateCloudProvisioningStateFailed, + "pending": PrivateCloudProvisioningStatePending, + "succeeded": PrivateCloudProvisioningStateSucceeded, + "updating": PrivateCloudProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateCloudProvisioningState(input) + return &out, nil +} + type SslEnum string const ( SslEnumDisabled SslEnum = "Disabled" SslEnumEnabled SslEnum = "Enabled" ) + +func PossibleValuesForSslEnum() []string { + return []string{ + string(SslEnumDisabled), + string(SslEnumEnabled), + } +} + +func parseSslEnum(input string) (*SslEnum, error) { + vals := map[string]SslEnum{ + "disabled": SslEnumDisabled, + "enabled": SslEnumEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SslEnum(input) + return &out, nil +} diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud.go index 6d83427b27a3..76867727cbf9 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud.go @@ -7,102 +7,118 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = PrivateCloudId{} + +// PrivateCloudId is a struct representing the Resource ID for a Private Cloud type PrivateCloudId struct { - SubscriptionId string - ResourceGroup string - Name string + SubscriptionId string + ResourceGroupName string + PrivateCloudName string } -func NewPrivateCloudID(subscriptionId, resourceGroup, name string) PrivateCloudId { +// NewPrivateCloudID returns a new PrivateCloudId struct +func NewPrivateCloudID(subscriptionId string, resourceGroupName string, privateCloudName string) PrivateCloudId { return PrivateCloudId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + PrivateCloudName: privateCloudName, } } -func (id PrivateCloudId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Private Cloud", segmentsStr) -} - -func (id PrivateCloudId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ParsePrivateCloudID parses a PrivateCloud ID into an PrivateCloudId struct +// ParsePrivateCloudID parses 'input' into a PrivateCloudId func ParsePrivateCloudID(input string) (*PrivateCloudId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := PrivateCloudId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := PrivateCloudId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.Name, err = id.PopSegment("privateClouds"); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParsePrivateCloudIDInsensitively parses an PrivateCloud ID into an PrivateCloudId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParsePrivateCloudID method should be used instead for validation etc. +// ParsePrivateCloudIDInsensitively parses 'input' case-insensitively into a PrivateCloudId +// note: this method should only be used for API response data and not user input func ParsePrivateCloudIDInsensitively(input string) (*PrivateCloudId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(PrivateCloudId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := PrivateCloudId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + var ok bool + id := PrivateCloudId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.PrivateCloudName, ok = parsed.Parsed["privateCloudName"]; !ok { + return nil, fmt.Errorf("the segment 'privateCloudName' was not found in the resource id %q", input) } - // find the correct casing for the 'privateClouds' segment - privateCloudsKey := "privateClouds" - for key := range id.Path { - if strings.EqualFold(key, privateCloudsKey) { - privateCloudsKey = key - break - } + return &id, nil +} + +// ValidatePrivateCloudID checks that 'input' can be parsed as a Private Cloud ID +func ValidatePrivateCloudID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.Name, err = id.PopSegment(privateCloudsKey); err != nil { - return nil, err + + if _, err := ParsePrivateCloudID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Private Cloud ID +func (id PrivateCloudId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AVS/privateClouds/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Cloud ID +func (id PrivateCloudId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftAVS", "Microsoft.AVS", "Microsoft.AVS"), + resourceids.StaticSegment("staticPrivateClouds", "privateClouds", "privateClouds"), + resourceids.UserSpecifiedSegment("privateCloudName", "privateCloudValue"), } +} - return &resourceId, nil +// String returns a human-readable description of this Private Cloud ID +func (id PrivateCloudId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Private Cloud Name: %q", id.PrivateCloudName), + } + return fmt.Sprintf("Private Cloud (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud_test.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud_test.go index 55dfebc7895b..79845f32c84a 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud_test.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_privatecloud_test.go @@ -6,13 +6,29 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = PrivateCloudId{} +var _ resourceids.ResourceId = PrivateCloudId{} -func TestPrivateCloudIDFormatter(t *testing.T) { - actual := NewPrivateCloudID("{subscriptionId}", "{resourceGroupName}", "{privateCloudName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}" +func TestNewPrivateCloudID(t *testing.T) { + id := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.PrivateCloudName != "privateCloudValue" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateCloudName'", id.PrivateCloudName, "privateCloudValue") + } +} + +func TestFormatPrivateCloudID(t *testing.T) { + actual := NewPrivateCloudID("12345678-1234-9876-4563-123456789012", "example-resource-group", "privateCloudValue").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,66 +38,61 @@ func TestParsePrivateCloudID(t *testing.T) { Error bool Expected *PrivateCloudId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.AVS/PRIVATECLOUDS/{PRIVATECLOUDNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -91,7 +102,7 @@ func TestParsePrivateCloudID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -100,12 +111,15 @@ func TestParsePrivateCloudID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } + } } @@ -115,90 +129,110 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { Error bool Expected *PrivateCloudId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for Name - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}", - Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateclouds/{privateCloudName}", - Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PRIVATECLOUDS/{privateCloudName}", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + PrivateCloudName: "privateCloudValue", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/PrIvAtEcLoUdS/{privateCloudName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.AVS/privateClouds/privateCloudValue/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe", Expected: &PrivateCloudId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - Name: "{privateCloudName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + PrivateCloudName: "pRiVaTeClOuDvAlUe", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.aVs/pRiVaTeClOuDs/pRiVaTeClOuDvAlUe/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -208,7 +242,7 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -217,11 +251,29 @@ func TestParsePrivateCloudIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + + if actual.PrivateCloudName != v.Expected.PrivateCloudName { + t.Fatalf("Expected %q but got %q for PrivateCloudName", v.Expected.PrivateCloudName, actual.PrivateCloudName) } + + } +} + +func TestSegmentsForPrivateCloudId(t *testing.T) { + segments := PrivateCloudId{}.Segments() + if len(segments) == 0 { + t.Fatalf("PrivateCloudId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup.go index 265516c85c1d..e77f90e6b46c 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup.go @@ -7,83 +7,103 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = ResourceGroupId{} + +// ResourceGroupId is a struct representing the Resource ID for a Resource Group type ResourceGroupId struct { - SubscriptionId string - ResourceGroup string + SubscriptionId string + ResourceGroupName string } -func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { +// NewResourceGroupID returns a new ResourceGroupId struct +func NewResourceGroupID(subscriptionId string, resourceGroupName string) ResourceGroupId { return ResourceGroupId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, } } -func (id ResourceGroupId) String() string { - segments := []string{ - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) -} - -func (id ResourceGroupId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) -} - -// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +// ParseResourceGroupID parses 'input' into a ResourceGroupId func ParseResourceGroupID(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err - } - - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } + var ok bool + id := ResourceGroupId{} - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - return &resourceId, nil + return &id, nil } -// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseResourceGroupID method should be used instead for validation etc. +// ParseResourceGroupIDInsensitively parses 'input' case-insensitively into a ResourceGroupId +// note: this method should only be used for API response data and not user input func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(ResourceGroupId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ResourceGroupId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - resourceId := ResourceGroupId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateResourceGroupID checks that 'input' can be parsed as a Resource Group ID +func ValidateResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + if _, err := ParseResourceGroupID(v); err != nil { + errors = append(errors, err) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + return +} + +// ID returns the formatted Resource Group ID +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Resource Group ID +func (id ResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), } +} - return &resourceId, nil +// String returns a human-readable description of this Resource Group ID +func (id ResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Resource Group (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup_test.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup_test.go index 0d5d5ca6dd7c..40d9830a8118 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup_test.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_resourcegroup_test.go @@ -6,13 +6,25 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = ResourceGroupId{} +var _ resourceids.ResourceId = ResourceGroupId{} -func TestResourceGroupIDFormatter(t *testing.T) { - actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() - expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" +func TestNewResourceGroupID(t *testing.T) { + id := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } +} + +func TestFormatResourceGroupID(t *testing.T) { + actual := NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,53 +34,40 @@ func TestParseResourceGroupID(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -78,7 +77,7 @@ func TestParseResourceGroupID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -87,9 +86,11 @@ func TestParseResourceGroupID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + } } @@ -99,74 +100,68 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { Error bool Expected *ResourceGroupId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // missing ResourceGroup - Input: "/subscriptions/{subscriptionId}/", + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // missing value for ResourceGroup - Input: "/subscriptions/{subscriptionId}/resourceGroups/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", - Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", - }, + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", Expected: &ResourceGroupId{ - SubscriptionId: "{subscriptionId}", - ResourceGroup: "{resourceGroupName}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", }, }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/extra", + Error: true, + }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -176,7 +171,7 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -185,8 +180,25 @@ func TestParseResourceGroupIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) } + + } +} + +func TestSegmentsForResourceGroupId(t *testing.T) { + segments := ResourceGroupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ResourceGroupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription.go index 73ac32424cea..4b724a528c23 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription.go @@ -7,69 +7,90 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) +var _ resourceids.ResourceId = SubscriptionId{} + +// SubscriptionId is a struct representing the Resource ID for a Subscription type SubscriptionId struct { SubscriptionId string } +// NewSubscriptionID returns a new SubscriptionId struct func NewSubscriptionID(subscriptionId string) SubscriptionId { return SubscriptionId{ SubscriptionId: subscriptionId, } } -func (id SubscriptionId) String() string { - segments := []string{} - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Subscription", segmentsStr) -} - -func (id SubscriptionId) ID() string { - fmtString := "/subscriptions/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId) -} - -// ParseSubscriptionID parses a Subscription ID into an SubscriptionId struct +// ParseSubscriptionID parses 'input' into a SubscriptionId func ParseSubscriptionID(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, false) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, - } + var ok bool + id := SubscriptionId{} - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil + return &id, nil } -// ParseSubscriptionIDInsensitively parses an Subscription ID into an SubscriptionId struct, insensitively -// This should only be used to parse an ID for rewriting to a consistent casing, -// the ParseSubscriptionID method should be used instead for validation etc. +// ParseSubscriptionIDInsensitively parses 'input' case-insensitively into a SubscriptionId +// note: this method should only be used for API response data and not user input func ParseSubscriptionIDInsensitively(input string) (*SubscriptionId, error) { - id, err := resourceids.ParseAzureResourceID(input) + parser := resourceids.NewParserFromResourceIdType(SubscriptionId{}) + parsed, err := parser.Parse(input, true) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing %q: %+v", input, err) } - resourceId := SubscriptionId{ - SubscriptionId: id.SubscriptionID, + var ok bool + id := SubscriptionId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) } - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + return &id, nil +} + +// ValidateSubscriptionID checks that 'input' can be parsed as a Subscription ID +func ValidateSubscriptionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return } - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err + if _, err := ParseSubscriptionID(v); err != nil { + errors = append(errors, err) } - return &resourceId, nil + return +} + +// ID returns the formatted Subscription ID +func (id SubscriptionId) ID() string { + fmtString := "/subscriptions/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription ID +func (id SubscriptionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + } +} + +// String returns a human-readable description of this Subscription ID +func (id SubscriptionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + } + return fmt.Sprintf("Subscription (%s)", strings.Join(components, "\n")) } diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription_test.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription_test.go index 3db1c1c0f8d0..b1c106ebe639 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription_test.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/id_subscription_test.go @@ -6,13 +6,21 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" ) -var _ resourceids.Id = SubscriptionId{} +var _ resourceids.ResourceId = SubscriptionId{} -func TestSubscriptionIDFormatter(t *testing.T) { - actual := NewSubscriptionID("{subscriptionId}").ID() - expected := "/subscriptions/{subscriptionId}" +func TestNewSubscriptionID(t *testing.T) { + id := NewSubscriptionID("12345678-1234-9876-4563-123456789012") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } +} + +func TestFormatSubscriptionID(t *testing.T) { + actual := NewSubscriptionID("12345678-1234-9876-4563-123456789012").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012" if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) + t.Fatalf("Expected the Formatted ID to be %q but got %q", actual, expected) } } @@ -22,40 +30,29 @@ func TestParseSubscriptionID(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // upper-cased - Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}", + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -65,7 +62,7 @@ func TestParseSubscriptionID(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -74,6 +71,7 @@ func TestParseSubscriptionID(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + } } @@ -83,58 +81,46 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { Error bool Expected *SubscriptionId }{ - { - // empty + // Incomplete URI Input: "", Error: true, }, - { - // missing SubscriptionId - Input: "/", + // Incomplete URI + Input: "/subscriptions", Error: true, }, - { - // missing value for SubscriptionId - Input: "/subscriptions/", + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", Error: true, }, - { - // valid - Input: "/subscriptions/{subscriptionId}", + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // lower-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, - { - // upper-cased segment names - Input: "/subscriptions/{subscriptionId}", + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", + SubscriptionId: "12345678-1234-9876-4563-123456789012", }, }, - { - // mixed-cased segment names - Input: "/subscriptions/{subscriptionId}", - Expected: &SubscriptionId{ - SubscriptionId: "{subscriptionId}", - }, + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/extra", + Error: true, }, } - for _, v := range testData { t.Logf("[DEBUG] Testing %q", v.Input) @@ -144,7 +130,7 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { continue } - t.Fatalf("Expect a value but got an error: %s", err) + t.Fatalf("Expect a value but got an error: %+v", err) } if v.Error { t.Fatal("Expect an error but didn't get one") @@ -153,5 +139,21 @@ func TestParseSubscriptionIDInsensitively(t *testing.T) { if actual.SubscriptionId != v.Expected.SubscriptionId { t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) } + + } +} + +func TestSegmentsForSubscriptionId(t *testing.T) { + segments := SubscriptionId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SubscriptionId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) } } diff --git a/internal/services/vmware/sdk/2020-03-20/privateclouds/predicates.go b/internal/services/vmware/sdk/2020-03-20/privateclouds/predicates.go index 661db72eba43..e605fb11783b 100644 --- a/internal/services/vmware/sdk/2020-03-20/privateclouds/predicates.go +++ b/internal/services/vmware/sdk/2020-03-20/privateclouds/predicates.go @@ -8,6 +8,7 @@ type PrivateCloudPredicate struct { } func (p PrivateCloudPredicate) Matches(input PrivateCloud) bool { + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { return false } diff --git a/internal/services/vmware/vmware_cluster_resource.go b/internal/services/vmware/vmware_cluster_resource.go index f72917a5800b..438260609655 100644 --- a/internal/services/vmware/vmware_cluster_resource.go +++ b/internal/services/vmware/vmware_cluster_resource.go @@ -96,7 +96,7 @@ func resourceVmwareClusterCreate(d *pluginsdk.ResourceData, meta interface{}) er return err } - id := clusters.NewClusterID(subscriptionId, privateCloudId.ResourceGroup, privateCloudId.Name, name) + id := clusters.NewClusterID(subscriptionId, privateCloudId.ResourceGroupName, privateCloudId.PrivateCloudName, name) existing, err := client.Get(ctx, id) if err != nil { if !response.WasNotFound(existing.HttpResponse) { @@ -144,8 +144,8 @@ func resourceVmwareClusterRead(d *pluginsdk.ResourceData, meta interface{}) erro return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("vmware_cloud_id", privateclouds.NewPrivateCloudID(id.SubscriptionId, id.ResourceGroup, id.PrivateCloudName).ID()) + d.Set("name", id.ClusterName) + d.Set("vmware_cloud_id", privateclouds.NewPrivateCloudID(id.SubscriptionId, id.ResourceGroupName, id.PrivateCloudName).ID()) if model := resp.Model; model != nil { d.Set("cluster_node_count", model.Properties.ClusterSize) diff --git a/internal/services/vmware/vmware_express_route_authorization_resource.go b/internal/services/vmware/vmware_express_route_authorization_resource.go index 5cb6f2630169..d1f2d985e988 100644 --- a/internal/services/vmware/vmware_express_route_authorization_resource.go +++ b/internal/services/vmware/vmware_express_route_authorization_resource.go @@ -74,7 +74,7 @@ func resourceVmwareExpressRouteAuthorizationCreate(d *pluginsdk.ResourceData, me return err } - id := authorizations.NewAuthorizationID(subscriptionId, privateCloudId.ResourceGroup, privateCloudId.Name, name) + id := authorizations.NewAuthorizationID(subscriptionId, privateCloudId.ResourceGroupName, privateCloudId.PrivateCloudName, name) existing, err := client.Get(ctx, id) if err != nil { if !response.WasNotFound(existing.HttpResponse) { @@ -117,8 +117,8 @@ func resourceVmwareExpressRouteAuthorizationRead(d *pluginsdk.ResourceData, meta return fmt.Errorf("retrieving %q: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("private_cloud_id", privateclouds.NewPrivateCloudID(subscriptionId, id.ResourceGroup, id.PrivateCloudName).ID()) + d.Set("name", id.AuthorizationName) + d.Set("private_cloud_id", privateclouds.NewPrivateCloudID(subscriptionId, id.ResourceGroupName, id.PrivateCloudName).ID()) if model := resp.Model; model != nil { if props := model.Properties; props != nil { diff --git a/internal/services/vmware/vmware_private_cloud_data_source.go b/internal/services/vmware/vmware_private_cloud_data_source.go index b6bbc656f3b8..ef09d278a100 100644 --- a/internal/services/vmware/vmware_private_cloud_data_source.go +++ b/internal/services/vmware/vmware_private_cloud_data_source.go @@ -165,8 +165,8 @@ func dataSourceVmwarePrivateCloudRead(d *pluginsdk.ResourceData, meta interface{ } d.SetId(id.ID()) - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.PrivateCloudName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", location.Normalize(model.Location)) diff --git a/internal/services/vmware/vmware_private_cloud_resource.go b/internal/services/vmware/vmware_private_cloud_resource.go index 22cf4ea8cce0..fafe9cef0f84 100644 --- a/internal/services/vmware/vmware_private_cloud_resource.go +++ b/internal/services/vmware/vmware_private_cloud_resource.go @@ -257,8 +257,8 @@ func resourceVmwarePrivateCloudRead(d *pluginsdk.ResourceData, meta interface{}) return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", id.Name) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("name", id.PrivateCloudName) + d.Set("resource_group_name", id.ResourceGroupName) if model := resp.Model; model != nil { d.Set("location", location.Normalize(model.Location)) diff --git a/internal/services/web/app_service_hybrid_connection_resource.go b/internal/services/web/app_service_hybrid_connection_resource.go index f628270e503d..d80658d03f80 100644 --- a/internal/services/web/app_service_hybrid_connection_resource.go +++ b/internal/services/web/app_service_hybrid_connection_resource.go @@ -120,7 +120,7 @@ func resourceAppServiceHybridConnectionCreateUpdate(d *pluginsdk.ResourceData, m return fmt.Errorf("parsing relay ID %q: %s", relayArmURI, err) } namespaceName := relayId.NamespaceName - relayName := relayId.Name + relayName := relayId.HybridConnectionName if d.IsNewResource() { existing, err := client.GetHybridConnection(ctx, resourceGroup, name, namespaceName, relayName) @@ -257,5 +257,5 @@ func findRelayNamespace(ctx context.Context, client *namespaces.NamespacesClient if err != nil { return nil, fmt.Errorf("relay Namespace id not valid: %+v", err) } - return &id.ResourceGroup, nil + return &id.ResourceGroupName, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/CHANGELOG.md deleted file mode 100644 index e66613099e84..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/CHANGELOG.md +++ /dev/null @@ -1,561 +0,0 @@ -# Change History - -## Breaking Changes - -### Removed Constants - -1. AccessLevel.None -1. AccessLevel.Read -1. AccessLevel.Write -1. AggregatedReplicationState.Completed -1. AggregatedReplicationState.Failed -1. AggregatedReplicationState.InProgress -1. AggregatedReplicationState.Unknown -1. AvailabilitySetSkuTypes.Aligned -1. AvailabilitySetSkuTypes.Classic -1. ComponentNames.MicrosoftWindowsShellSetup -1. DiffDiskOptions.Local -1. DiffDiskPlacement.CacheDisk -1. DiffDiskPlacement.ResourceDisk -1. DiskCreateOption.Attach -1. DiskCreateOption.Copy -1. DiskCreateOption.Empty -1. DiskCreateOption.FromImage -1. DiskCreateOption.Import -1. DiskCreateOption.Restore -1. DiskCreateOption.Upload -1. DiskDetachOptionTypes.ForceDetach -1. DiskEncryptionSetType.EncryptionAtRestWithCustomerKey -1. DiskEncryptionSetType.EncryptionAtRestWithPlatformAndCustomerKeys -1. DiskSecurityTypes.TrustedLaunch -1. DiskState.ActiveSAS -1. DiskState.ActiveUpload -1. DiskState.Attached -1. DiskState.ReadyToUpload -1. DiskState.Reserved -1. DiskState.Unattached -1. DiskStorageAccountTypes.PremiumLRS -1. DiskStorageAccountTypes.PremiumZRS -1. DiskStorageAccountTypes.StandardLRS -1. DiskStorageAccountTypes.StandardSSDLRS -1. DiskStorageAccountTypes.StandardSSDZRS -1. DiskStorageAccountTypes.UltraSSDLRS -1. ExtendedLocationTypes.EdgeZone -1. HyperVGeneration.V1 -1. HyperVGeneration.V2 -1. IPVersion.IPv4 -1. IPVersion.IPv6 -1. InstanceViewTypes.InstanceView -1. IntervalInMins.FiveMins -1. IntervalInMins.SixtyMins -1. IntervalInMins.ThirtyMins -1. IntervalInMins.ThreeMins -1. LinuxVMGuestPatchMode.AutomaticByPlatform -1. LinuxVMGuestPatchMode.ImageDefault -1. NetworkAccessPolicy.AllowAll -1. NetworkAccessPolicy.AllowPrivate -1. NetworkAccessPolicy.DenyAll -1. OperatingSystemStateTypes.Generalized -1. OperatingSystemStateTypes.Specialized -1. OperatingSystemTypes.Linux -1. OperatingSystemTypes.Windows -1. OrchestrationMode.Flexible -1. OrchestrationMode.Uniform -1. OrchestrationServiceNames.AutomaticRepairs -1. OrchestrationServiceState.NotRunning -1. OrchestrationServiceState.Running -1. OrchestrationServiceState.Suspended -1. OrchestrationServiceStateAction.Resume -1. OrchestrationServiceStateAction.Suspend -1. PassNames.OobeSystem -1. PrivateEndpointServiceConnectionStatus.Approved -1. PrivateEndpointServiceConnectionStatus.Pending -1. PrivateEndpointServiceConnectionStatus.Rejected -1. ProtocolTypes.HTTP -1. ProtocolTypes.HTTPS -1. ProximityPlacementGroupType.Standard -1. ProximityPlacementGroupType.Ultra -1. ResourceSkuRestrictionsReasonCode.NotAvailableForSubscription -1. ResourceSkuRestrictionsReasonCode.QuotaID -1. ResourceSkuRestrictionsType.Location -1. ResourceSkuRestrictionsType.Zone -1. RollingUpgradeActionType.Cancel -1. RollingUpgradeActionType.Start -1. SettingNames.AutoLogon -1. SettingNames.FirstLogonCommands -1. StatusLevelTypes.Error -1. StatusLevelTypes.Info -1. StatusLevelTypes.Warning -1. UpgradeMode.Automatic -1. UpgradeMode.Manual -1. UpgradeMode.Rolling -1. VMGuestPatchClassificationLinux.Critical -1. VMGuestPatchClassificationLinux.Other -1. VMGuestPatchClassificationLinux.Security -1. VMGuestPatchRebootSetting.Always -1. VMGuestPatchRebootSetting.IfRequired -1. VMGuestPatchRebootSetting.Never -1. VirtualMachineEvictionPolicyTypes.Deallocate -1. VirtualMachineEvictionPolicyTypes.Delete -1. VirtualMachinePriorityTypes.Low -1. VirtualMachinePriorityTypes.Regular -1. VirtualMachinePriorityTypes.Spot -1. VirtualMachineScaleSetScaleInRules.Default -1. VirtualMachineScaleSetScaleInRules.NewestVM -1. VirtualMachineScaleSetScaleInRules.OldestVM -1. VirtualMachineSizeTypes.BasicA0 -1. VirtualMachineSizeTypes.BasicA1 -1. VirtualMachineSizeTypes.BasicA2 -1. VirtualMachineSizeTypes.BasicA3 -1. VirtualMachineSizeTypes.BasicA4 -1. VirtualMachineSizeTypes.StandardA0 -1. VirtualMachineSizeTypes.StandardA1 -1. VirtualMachineSizeTypes.StandardA10 -1. VirtualMachineSizeTypes.StandardA11 -1. VirtualMachineSizeTypes.StandardA1V2 -1. VirtualMachineSizeTypes.StandardA2 -1. VirtualMachineSizeTypes.StandardA2V2 -1. VirtualMachineSizeTypes.StandardA2mV2 -1. VirtualMachineSizeTypes.StandardA3 -1. VirtualMachineSizeTypes.StandardA4 -1. VirtualMachineSizeTypes.StandardA4V2 -1. VirtualMachineSizeTypes.StandardA4mV2 -1. VirtualMachineSizeTypes.StandardA5 -1. VirtualMachineSizeTypes.StandardA6 -1. VirtualMachineSizeTypes.StandardA7 -1. VirtualMachineSizeTypes.StandardA8 -1. VirtualMachineSizeTypes.StandardA8V2 -1. VirtualMachineSizeTypes.StandardA8mV2 -1. VirtualMachineSizeTypes.StandardA9 -1. VirtualMachineSizeTypes.StandardB1ms -1. VirtualMachineSizeTypes.StandardB1s -1. VirtualMachineSizeTypes.StandardB2ms -1. VirtualMachineSizeTypes.StandardB2s -1. VirtualMachineSizeTypes.StandardB4ms -1. VirtualMachineSizeTypes.StandardB8ms -1. VirtualMachineSizeTypes.StandardD1 -1. VirtualMachineSizeTypes.StandardD11 -1. VirtualMachineSizeTypes.StandardD11V2 -1. VirtualMachineSizeTypes.StandardD12 -1. VirtualMachineSizeTypes.StandardD12V2 -1. VirtualMachineSizeTypes.StandardD13 -1. VirtualMachineSizeTypes.StandardD13V2 -1. VirtualMachineSizeTypes.StandardD14 -1. VirtualMachineSizeTypes.StandardD14V2 -1. VirtualMachineSizeTypes.StandardD15V2 -1. VirtualMachineSizeTypes.StandardD16V3 -1. VirtualMachineSizeTypes.StandardD16sV3 -1. VirtualMachineSizeTypes.StandardD1V2 -1. VirtualMachineSizeTypes.StandardD2 -1. VirtualMachineSizeTypes.StandardD2V2 -1. VirtualMachineSizeTypes.StandardD2V3 -1. VirtualMachineSizeTypes.StandardD2sV3 -1. VirtualMachineSizeTypes.StandardD3 -1. VirtualMachineSizeTypes.StandardD32V3 -1. VirtualMachineSizeTypes.StandardD32sV3 -1. VirtualMachineSizeTypes.StandardD3V2 -1. VirtualMachineSizeTypes.StandardD4 -1. VirtualMachineSizeTypes.StandardD4V2 -1. VirtualMachineSizeTypes.StandardD4V3 -1. VirtualMachineSizeTypes.StandardD4sV3 -1. VirtualMachineSizeTypes.StandardD5V2 -1. VirtualMachineSizeTypes.StandardD64V3 -1. VirtualMachineSizeTypes.StandardD64sV3 -1. VirtualMachineSizeTypes.StandardD8V3 -1. VirtualMachineSizeTypes.StandardD8sV3 -1. VirtualMachineSizeTypes.StandardDS1 -1. VirtualMachineSizeTypes.StandardDS11 -1. VirtualMachineSizeTypes.StandardDS11V2 -1. VirtualMachineSizeTypes.StandardDS12 -1. VirtualMachineSizeTypes.StandardDS12V2 -1. VirtualMachineSizeTypes.StandardDS13 -1. VirtualMachineSizeTypes.StandardDS132V2 -1. VirtualMachineSizeTypes.StandardDS134V2 -1. VirtualMachineSizeTypes.StandardDS13V2 -1. VirtualMachineSizeTypes.StandardDS14 -1. VirtualMachineSizeTypes.StandardDS144V2 -1. VirtualMachineSizeTypes.StandardDS148V2 -1. VirtualMachineSizeTypes.StandardDS14V2 -1. VirtualMachineSizeTypes.StandardDS15V2 -1. VirtualMachineSizeTypes.StandardDS1V2 -1. VirtualMachineSizeTypes.StandardDS2 -1. VirtualMachineSizeTypes.StandardDS2V2 -1. VirtualMachineSizeTypes.StandardDS3 -1. VirtualMachineSizeTypes.StandardDS3V2 -1. VirtualMachineSizeTypes.StandardDS4 -1. VirtualMachineSizeTypes.StandardDS4V2 -1. VirtualMachineSizeTypes.StandardDS5V2 -1. VirtualMachineSizeTypes.StandardE16V3 -1. VirtualMachineSizeTypes.StandardE16sV3 -1. VirtualMachineSizeTypes.StandardE2V3 -1. VirtualMachineSizeTypes.StandardE2sV3 -1. VirtualMachineSizeTypes.StandardE3216V3 -1. VirtualMachineSizeTypes.StandardE328sV3 -1. VirtualMachineSizeTypes.StandardE32V3 -1. VirtualMachineSizeTypes.StandardE32sV3 -1. VirtualMachineSizeTypes.StandardE4V3 -1. VirtualMachineSizeTypes.StandardE4sV3 -1. VirtualMachineSizeTypes.StandardE6416sV3 -1. VirtualMachineSizeTypes.StandardE6432sV3 -1. VirtualMachineSizeTypes.StandardE64V3 -1. VirtualMachineSizeTypes.StandardE64sV3 -1. VirtualMachineSizeTypes.StandardE8V3 -1. VirtualMachineSizeTypes.StandardE8sV3 -1. VirtualMachineSizeTypes.StandardF1 -1. VirtualMachineSizeTypes.StandardF16 -1. VirtualMachineSizeTypes.StandardF16s -1. VirtualMachineSizeTypes.StandardF16sV2 -1. VirtualMachineSizeTypes.StandardF1s -1. VirtualMachineSizeTypes.StandardF2 -1. VirtualMachineSizeTypes.StandardF2s -1. VirtualMachineSizeTypes.StandardF2sV2 -1. VirtualMachineSizeTypes.StandardF32sV2 -1. VirtualMachineSizeTypes.StandardF4 -1. VirtualMachineSizeTypes.StandardF4s -1. VirtualMachineSizeTypes.StandardF4sV2 -1. VirtualMachineSizeTypes.StandardF64sV2 -1. VirtualMachineSizeTypes.StandardF72sV2 -1. VirtualMachineSizeTypes.StandardF8 -1. VirtualMachineSizeTypes.StandardF8s -1. VirtualMachineSizeTypes.StandardF8sV2 -1. VirtualMachineSizeTypes.StandardG1 -1. VirtualMachineSizeTypes.StandardG2 -1. VirtualMachineSizeTypes.StandardG3 -1. VirtualMachineSizeTypes.StandardG4 -1. VirtualMachineSizeTypes.StandardG5 -1. VirtualMachineSizeTypes.StandardGS1 -1. VirtualMachineSizeTypes.StandardGS2 -1. VirtualMachineSizeTypes.StandardGS3 -1. VirtualMachineSizeTypes.StandardGS4 -1. VirtualMachineSizeTypes.StandardGS44 -1. VirtualMachineSizeTypes.StandardGS48 -1. VirtualMachineSizeTypes.StandardGS5 -1. VirtualMachineSizeTypes.StandardGS516 -1. VirtualMachineSizeTypes.StandardGS58 -1. VirtualMachineSizeTypes.StandardH16 -1. VirtualMachineSizeTypes.StandardH16m -1. VirtualMachineSizeTypes.StandardH16mr -1. VirtualMachineSizeTypes.StandardH16r -1. VirtualMachineSizeTypes.StandardH8 -1. VirtualMachineSizeTypes.StandardH8m -1. VirtualMachineSizeTypes.StandardL16s -1. VirtualMachineSizeTypes.StandardL32s -1. VirtualMachineSizeTypes.StandardL4s -1. VirtualMachineSizeTypes.StandardL8s -1. VirtualMachineSizeTypes.StandardM12832ms -1. VirtualMachineSizeTypes.StandardM12864ms -1. VirtualMachineSizeTypes.StandardM128ms -1. VirtualMachineSizeTypes.StandardM128s -1. VirtualMachineSizeTypes.StandardM6416ms -1. VirtualMachineSizeTypes.StandardM6432ms -1. VirtualMachineSizeTypes.StandardM64ms -1. VirtualMachineSizeTypes.StandardM64s -1. VirtualMachineSizeTypes.StandardNC12 -1. VirtualMachineSizeTypes.StandardNC12sV2 -1. VirtualMachineSizeTypes.StandardNC12sV3 -1. VirtualMachineSizeTypes.StandardNC24 -1. VirtualMachineSizeTypes.StandardNC24r -1. VirtualMachineSizeTypes.StandardNC24rsV2 -1. VirtualMachineSizeTypes.StandardNC24rsV3 -1. VirtualMachineSizeTypes.StandardNC24sV2 -1. VirtualMachineSizeTypes.StandardNC24sV3 -1. VirtualMachineSizeTypes.StandardNC6 -1. VirtualMachineSizeTypes.StandardNC6sV2 -1. VirtualMachineSizeTypes.StandardNC6sV3 -1. VirtualMachineSizeTypes.StandardND12s -1. VirtualMachineSizeTypes.StandardND24rs -1. VirtualMachineSizeTypes.StandardND24s -1. VirtualMachineSizeTypes.StandardND6s -1. VirtualMachineSizeTypes.StandardNV12 -1. VirtualMachineSizeTypes.StandardNV24 -1. VirtualMachineSizeTypes.StandardNV6 - -## Additive Changes - -### New Constants - -1. AccessLevel.AccessLevelNone -1. AccessLevel.AccessLevelRead -1. AccessLevel.AccessLevelWrite -1. AggregatedReplicationState.AggregatedReplicationStateCompleted -1. AggregatedReplicationState.AggregatedReplicationStateFailed -1. AggregatedReplicationState.AggregatedReplicationStateInProgress -1. AggregatedReplicationState.AggregatedReplicationStateUnknown -1. AvailabilitySetSkuTypes.AvailabilitySetSkuTypesAligned -1. AvailabilitySetSkuTypes.AvailabilitySetSkuTypesClassic -1. ComponentNames.ComponentNamesMicrosoftWindowsShellSetup -1. DiffDiskOptions.DiffDiskOptionsLocal -1. DiffDiskPlacement.DiffDiskPlacementCacheDisk -1. DiffDiskPlacement.DiffDiskPlacementResourceDisk -1. DiskCreateOption.DiskCreateOptionAttach -1. DiskCreateOption.DiskCreateOptionCopy -1. DiskCreateOption.DiskCreateOptionEmpty -1. DiskCreateOption.DiskCreateOptionFromImage -1. DiskCreateOption.DiskCreateOptionImport -1. DiskCreateOption.DiskCreateOptionRestore -1. DiskCreateOption.DiskCreateOptionUpload -1. DiskDetachOptionTypes.DiskDetachOptionTypesForceDetach -1. DiskEncryptionSetType.DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey -1. DiskEncryptionSetType.DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys -1. DiskSecurityTypes.DiskSecurityTypesTrustedLaunch -1. DiskState.DiskStateActiveSAS -1. DiskState.DiskStateActiveUpload -1. DiskState.DiskStateAttached -1. DiskState.DiskStateReadyToUpload -1. DiskState.DiskStateReserved -1. DiskState.DiskStateUnattached -1. DiskStorageAccountTypes.DiskStorageAccountTypesPremiumLRS -1. DiskStorageAccountTypes.DiskStorageAccountTypesPremiumZRS -1. DiskStorageAccountTypes.DiskStorageAccountTypesStandardLRS -1. DiskStorageAccountTypes.DiskStorageAccountTypesStandardSSDLRS -1. DiskStorageAccountTypes.DiskStorageAccountTypesStandardSSDZRS -1. DiskStorageAccountTypes.DiskStorageAccountTypesUltraSSDLRS -1. ExtendedLocationTypes.ExtendedLocationTypesEdgeZone -1. HyperVGeneration.HyperVGenerationV1 -1. HyperVGeneration.HyperVGenerationV2 -1. IPVersion.IPVersionIPv4 -1. IPVersion.IPVersionIPv6 -1. InstanceViewTypes.InstanceViewTypesInstanceView -1. IntervalInMins.IntervalInMinsFiveMins -1. IntervalInMins.IntervalInMinsSixtyMins -1. IntervalInMins.IntervalInMinsThirtyMins -1. IntervalInMins.IntervalInMinsThreeMins -1. LinuxVMGuestPatchMode.LinuxVMGuestPatchModeAutomaticByPlatform -1. LinuxVMGuestPatchMode.LinuxVMGuestPatchModeImageDefault -1. NetworkAccessPolicy.NetworkAccessPolicyAllowAll -1. NetworkAccessPolicy.NetworkAccessPolicyAllowPrivate -1. NetworkAccessPolicy.NetworkAccessPolicyDenyAll -1. OperatingSystemStateTypes.OperatingSystemStateTypesGeneralized -1. OperatingSystemStateTypes.OperatingSystemStateTypesSpecialized -1. OperatingSystemTypes.OperatingSystemTypesLinux -1. OperatingSystemTypes.OperatingSystemTypesWindows -1. OrchestrationMode.OrchestrationModeFlexible -1. OrchestrationMode.OrchestrationModeUniform -1. OrchestrationServiceNames.OrchestrationServiceNamesAutomaticRepairs -1. OrchestrationServiceState.OrchestrationServiceStateNotRunning -1. OrchestrationServiceState.OrchestrationServiceStateRunning -1. OrchestrationServiceState.OrchestrationServiceStateSuspended -1. OrchestrationServiceStateAction.OrchestrationServiceStateActionResume -1. OrchestrationServiceStateAction.OrchestrationServiceStateActionSuspend -1. PassNames.PassNamesOobeSystem -1. PrivateEndpointServiceConnectionStatus.PrivateEndpointServiceConnectionStatusApproved -1. PrivateEndpointServiceConnectionStatus.PrivateEndpointServiceConnectionStatusPending -1. PrivateEndpointServiceConnectionStatus.PrivateEndpointServiceConnectionStatusRejected -1. ProtocolTypes.ProtocolTypesHTTP -1. ProtocolTypes.ProtocolTypesHTTPS -1. ProximityPlacementGroupType.ProximityPlacementGroupTypeStandard -1. ProximityPlacementGroupType.ProximityPlacementGroupTypeUltra -1. ResourceSkuRestrictionsReasonCode.ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription -1. ResourceSkuRestrictionsReasonCode.ResourceSkuRestrictionsReasonCodeQuotaID -1. ResourceSkuRestrictionsType.ResourceSkuRestrictionsTypeLocation -1. ResourceSkuRestrictionsType.ResourceSkuRestrictionsTypeZone -1. RollingUpgradeActionType.RollingUpgradeActionTypeCancel -1. RollingUpgradeActionType.RollingUpgradeActionTypeStart -1. SettingNames.SettingNamesAutoLogon -1. SettingNames.SettingNamesFirstLogonCommands -1. StatusLevelTypes.StatusLevelTypesError -1. StatusLevelTypes.StatusLevelTypesInfo -1. StatusLevelTypes.StatusLevelTypesWarning -1. UpgradeMode.UpgradeModeAutomatic -1. UpgradeMode.UpgradeModeManual -1. UpgradeMode.UpgradeModeRolling -1. VMGuestPatchClassificationLinux.VMGuestPatchClassificationLinuxCritical -1. VMGuestPatchClassificationLinux.VMGuestPatchClassificationLinuxOther -1. VMGuestPatchClassificationLinux.VMGuestPatchClassificationLinuxSecurity -1. VMGuestPatchRebootSetting.VMGuestPatchRebootSettingAlways -1. VMGuestPatchRebootSetting.VMGuestPatchRebootSettingIfRequired -1. VMGuestPatchRebootSetting.VMGuestPatchRebootSettingNever -1. VirtualMachineEvictionPolicyTypes.VirtualMachineEvictionPolicyTypesDeallocate -1. VirtualMachineEvictionPolicyTypes.VirtualMachineEvictionPolicyTypesDelete -1. VirtualMachinePriorityTypes.VirtualMachinePriorityTypesLow -1. VirtualMachinePriorityTypes.VirtualMachinePriorityTypesRegular -1. VirtualMachinePriorityTypes.VirtualMachinePriorityTypesSpot -1. VirtualMachineScaleSetScaleInRules.VirtualMachineScaleSetScaleInRulesDefault -1. VirtualMachineScaleSetScaleInRules.VirtualMachineScaleSetScaleInRulesNewestVM -1. VirtualMachineScaleSetScaleInRules.VirtualMachineScaleSetScaleInRulesOldestVM -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesBasicA0 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesBasicA1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesBasicA2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesBasicA3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesBasicA4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA0 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA10 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA11 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA1V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA2V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA2mV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA4V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA4mV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA5 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA6 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA7 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA8 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA8V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA8mV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardA9 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardB1ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardB1s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardB2ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardB2s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardB4ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardB8ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD11 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD11V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD12 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD12V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD13 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD13V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD14 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD14V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD15V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD16V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD16sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD1V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD2V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD2V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD2sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD32V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD32sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD3V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD4V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD4V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD4sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD5V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD64V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD64sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD8V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardD8sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS11 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS11V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS12 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS12V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS13 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS132V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS134V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS13V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS14 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS144V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS148V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS14V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS15V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS1V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS2V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS3V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS4V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardDS5V2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE16V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE16sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE2V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE2sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE3216V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE328sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE32V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE32sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE4V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE4sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE6416sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE6432sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE64V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE64sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE8V3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardE8sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF16 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF16s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF16sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF1s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF2s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF2sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF32sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF4s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF4sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF64sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF72sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF8 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF8s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardF8sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardG1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardG2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardG3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardG4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardG5 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS1 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS4 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS44 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS48 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS5 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS516 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardGS58 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardH16 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardH16m -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardH16mr -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardH16r -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardH8 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardH8m -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardL16s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardL32s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardL4s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardL8s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM12832ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM12864ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM128ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM128s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM6416ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM6432ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM64ms -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardM64s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC12 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC12sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC12sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC24 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC24r -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC24rsV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC24rsV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC24sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC24sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC6 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC6sV2 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNC6sV3 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardND12s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardND24rs -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardND24s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardND6s -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNV12 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNV24 -1. VirtualMachineSizeTypes.VirtualMachineSizeTypesStandardNV6 - -### New Funcs - -1. *DiskRestorePointGrantAccessFuture.UnmarshalJSON([]byte) error -1. *DiskRestorePointRevokeAccessFuture.UnmarshalJSON([]byte) error -1. DiskRestorePointClient.GrantAccess(context.Context, string, string, string, string, GrantAccessData) (DiskRestorePointGrantAccessFuture, error) -1. DiskRestorePointClient.GrantAccessPreparer(context.Context, string, string, string, string, GrantAccessData) (*http.Request, error) -1. DiskRestorePointClient.GrantAccessResponder(*http.Response) (AccessURI, error) -1. DiskRestorePointClient.GrantAccessSender(*http.Request) (DiskRestorePointGrantAccessFuture, error) -1. DiskRestorePointClient.RevokeAccess(context.Context, string, string, string, string) (DiskRestorePointRevokeAccessFuture, error) -1. DiskRestorePointClient.RevokeAccessPreparer(context.Context, string, string, string, string) (*http.Request, error) -1. DiskRestorePointClient.RevokeAccessResponder(*http.Response) (autorest.Response, error) -1. DiskRestorePointClient.RevokeAccessSender(*http.Request) (DiskRestorePointRevokeAccessFuture, error) - -### Struct Changes - -#### New Structs - -1. DiskRestorePointGrantAccessFuture -1. DiskRestorePointRevokeAccessFuture diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/CHANGELOG.md rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json similarity index 80% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/_meta.json rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json index 6dad784013bb..4e37b0c3a835 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json @@ -1,10 +1,10 @@ { - "commit": "af463c3f9502d353b8a009685177f13335adb8cd", + "commit": "bd64220293a403f70ae8beebd56fb86951007acf", "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", - "tag": "package-2020-12-01", + "tag": "package-2021-07-01", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2020-12-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-07-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", "additional_properties": { "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/availabilitysets.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go index 07422d5ab10a..7953c6555084 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go @@ -77,7 +77,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -155,7 +155,7 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -230,7 +230,7 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -309,7 +309,7 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -423,7 +423,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -501,7 +501,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -618,7 +618,7 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go new file mode 100644 index 000000000000..a525a29711a9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go @@ -0,0 +1,596 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CapacityReservationGroupsClient is the compute Client +type CapacityReservationGroupsClient struct { + BaseClient +} + +// NewCapacityReservationGroupsClient creates an instance of the CapacityReservationGroupsClient client. +func NewCapacityReservationGroupsClient(subscriptionID string) CapacityReservationGroupsClient { + return NewCapacityReservationGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCapacityReservationGroupsClientWithBaseURI creates an instance of the CapacityReservationGroupsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewCapacityReservationGroupsClientWithBaseURI(baseURI string, subscriptionID string) CapacityReservationGroupsClient { + return CapacityReservationGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update a capacity reservation group. When updating a capacity reservation +// group, only tags may be modified. Please refer to https://aka.ms/CapacityReservation for more details. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// parameters - parameters supplied to the Create capacity reservation Group. +func (client CapacityReservationGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroup) (result CapacityReservationGroup, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client CapacityReservationGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client CapacityReservationGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result CapacityReservationGroup, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete a capacity reservation group. This operation is allowed only if all the associated +// resources are disassociated from the reservation group and all capacity reservations under the reservation group +// have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +func (client CapacityReservationGroupsClient) Delete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, capacityReservationGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client CapacityReservationGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client CapacityReservationGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation that retrieves information about a capacity reservation group. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance +// views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime +// properties of a capacity reservation that is managed by the platform and can change outside of control plane +// operations. +func (client CapacityReservationGroupsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, expand CapacityReservationGroupInstanceViewTypes) (result CapacityReservationGroup, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, capacityReservationGroupName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CapacityReservationGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, expand CapacityReservationGroupInstanceViewTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CapacityReservationGroupsClient) GetResponder(resp *http.Response) (result CapacityReservationGroup, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all of the capacity reservation groups in the specified resource group. Use the nextLink +// property in the response to get the next page of capacity reservation groups. +// Parameters: +// resourceGroupName - the name of the resource group. +// expand - the expand expression to apply on the operation. Based on the expand param(s) specified we return +// Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation +// group in the response. +func (client CapacityReservationGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.crglr.Response.Response != nil { + sc = result.crglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.crglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.crglr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListByResourceGroup", resp, "Failure responding to request") + return + } + if result.crglr.hasNextLink() && result.crglr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client CapacityReservationGroupsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, expand ExpandTypesForGetCapacityReservationGroups) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client CapacityReservationGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result CapacityReservationGroupListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client CapacityReservationGroupsClient) listByResourceGroupNextResults(ctx context.Context, lastResults CapacityReservationGroupListResult) (result CapacityReservationGroupListResult, err error) { + req, err := lastResults.capacityReservationGroupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client CapacityReservationGroupsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, expand) + return +} + +// ListBySubscription lists all of the capacity reservation groups in the subscription. Use the nextLink property in +// the response to get the next page of capacity reservation groups. +// Parameters: +// expand - the expand expression to apply on the operation. Based on the expand param(s) specified we return +// Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation +// group in the response. +func (client CapacityReservationGroupsClient) ListBySubscription(ctx context.Context, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListBySubscription") + defer func() { + sc := -1 + if result.crglr.Response.Response != nil { + sc = result.crglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listBySubscriptionNextResults + req, err := client.ListBySubscriptionPreparer(ctx, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.crglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result.crglr, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListBySubscription", resp, "Failure responding to request") + return + } + if result.crglr.hasNextLink() && result.crglr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client CapacityReservationGroupsClient) ListBySubscriptionPreparer(ctx context.Context, expand ExpandTypesForGetCapacityReservationGroups) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationGroupsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client CapacityReservationGroupsClient) ListBySubscriptionResponder(resp *http.Response) (result CapacityReservationGroupListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listBySubscriptionNextResults retrieves the next set of results, if any. +func (client CapacityReservationGroupsClient) listBySubscriptionNextResults(ctx context.Context, lastResults CapacityReservationGroupListResult) (result CapacityReservationGroupListResult, err error) { + req, err := lastResults.capacityReservationGroupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request") + } + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. +func (client CapacityReservationGroupsClient) ListBySubscriptionComplete(ctx context.Context, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListBySubscription(ctx, expand) + return +} + +// Update the operation to update a capacity reservation group. When updating a capacity reservation group, only tags +// may be modified. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// parameters - parameters supplied to the Update capacity reservation Group operation. +func (client CapacityReservationGroupsClient) Update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroupUpdate) (result CapacityReservationGroup, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client CapacityReservationGroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroupUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client CapacityReservationGroupsClient) UpdateResponder(resp *http.Response) (result CapacityReservationGroup, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go new file mode 100644 index 000000000000..fc894216ae13 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go @@ -0,0 +1,493 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CapacityReservationsClient is the compute Client +type CapacityReservationsClient struct { + BaseClient +} + +// NewCapacityReservationsClient creates an instance of the CapacityReservationsClient client. +func NewCapacityReservationsClient(subscriptionID string) CapacityReservationsClient { + return NewCapacityReservationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCapacityReservationsClientWithBaseURI creates an instance of the CapacityReservationsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewCapacityReservationsClientWithBaseURI(baseURI string, subscriptionID string) CapacityReservationsClient { + return CapacityReservationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update a capacity reservation. Please note some properties can be set only +// during capacity reservation creation. Please refer to https://aka.ms/CapacityReservation for more details. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// capacityReservationName - the name of the capacity reservation. +// parameters - parameters supplied to the Create capacity reservation. +func (client CapacityReservationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation) (result CapacityReservationsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("compute.CapacityReservationsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client CapacityReservationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "capacityReservationName": autorest.Encode("path", capacityReservationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationsClient) CreateOrUpdateSender(req *http.Request) (future CapacityReservationsCreateOrUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client CapacityReservationsClient) CreateOrUpdateResponder(resp *http.Response) (result CapacityReservation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete a capacity reservation. This operation is allowed only when all the associated +// resources are disassociated from the capacity reservation. Please refer to https://aka.ms/CapacityReservation for +// more details. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// capacityReservationName - the name of the capacity reservation. +func (client CapacityReservationsClient) Delete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string) (result CapacityReservationsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client CapacityReservationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "capacityReservationName": autorest.Encode("path", capacityReservationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationsClient) DeleteSender(req *http.Request) (future CapacityReservationsDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client CapacityReservationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation that retrieves information about the capacity reservation. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// capacityReservationName - the name of the capacity reservation. +// expand - the expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime +// properties of the capacity reservation that is managed by the platform and can change outside of control +// plane operations. +func (client CapacityReservationsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, expand CapacityReservationInstanceViewTypes) (result CapacityReservation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CapacityReservationsClient) GetPreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, expand CapacityReservationInstanceViewTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "capacityReservationName": autorest.Encode("path", capacityReservationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CapacityReservationsClient) GetResponder(resp *http.Response) (result CapacityReservation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByCapacityReservationGroup lists all of the capacity reservations in the specified capacity reservation group. +// Use the nextLink property in the response to get the next page of capacity reservations. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +func (client CapacityReservationsClient) ListByCapacityReservationGroup(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (result CapacityReservationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.ListByCapacityReservationGroup") + defer func() { + sc := -1 + if result.crlr.Response.Response != nil { + sc = result.crlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByCapacityReservationGroupNextResults + req, err := client.ListByCapacityReservationGroupPreparer(ctx, resourceGroupName, capacityReservationGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "ListByCapacityReservationGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByCapacityReservationGroupSender(req) + if err != nil { + result.crlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "ListByCapacityReservationGroup", resp, "Failure sending request") + return + } + + result.crlr, err = client.ListByCapacityReservationGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "ListByCapacityReservationGroup", resp, "Failure responding to request") + return + } + if result.crlr.hasNextLink() && result.crlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListByCapacityReservationGroupPreparer prepares the ListByCapacityReservationGroup request. +func (client CapacityReservationsClient) ListByCapacityReservationGroupPreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByCapacityReservationGroupSender sends the ListByCapacityReservationGroup request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationsClient) ListByCapacityReservationGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByCapacityReservationGroupResponder handles the response to the ListByCapacityReservationGroup request. The method always +// closes the http.Response Body. +func (client CapacityReservationsClient) ListByCapacityReservationGroupResponder(resp *http.Response) (result CapacityReservationListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByCapacityReservationGroupNextResults retrieves the next set of results, if any. +func (client CapacityReservationsClient) listByCapacityReservationGroupNextResults(ctx context.Context, lastResults CapacityReservationListResult) (result CapacityReservationListResult, err error) { + req, err := lastResults.capacityReservationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "listByCapacityReservationGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByCapacityReservationGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "listByCapacityReservationGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByCapacityReservationGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "listByCapacityReservationGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByCapacityReservationGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client CapacityReservationsClient) ListByCapacityReservationGroupComplete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (result CapacityReservationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.ListByCapacityReservationGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByCapacityReservationGroup(ctx, resourceGroupName, capacityReservationGroupName) + return +} + +// Update the operation to update a capacity reservation. +// Parameters: +// resourceGroupName - the name of the resource group. +// capacityReservationGroupName - the name of the capacity reservation group. +// capacityReservationName - the name of the capacity reservation. +// parameters - parameters supplied to the Update capacity reservation operation. +func (client CapacityReservationsClient) Update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate) (result CapacityReservationsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.Update") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client CapacityReservationsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName), + "capacityReservationName": autorest.Encode("path", capacityReservationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client CapacityReservationsClient) UpdateSender(req *http.Request) (future CapacityReservationsUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client CapacityReservationsClient) UpdateResponder(resp *http.Response) (result CapacityReservation, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go new file mode 100644 index 000000000000..7dfce08d9096 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go @@ -0,0 +1,422 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CloudServiceOperatingSystemsClient is the compute Client +type CloudServiceOperatingSystemsClient struct { + BaseClient +} + +// NewCloudServiceOperatingSystemsClient creates an instance of the CloudServiceOperatingSystemsClient client. +func NewCloudServiceOperatingSystemsClient(subscriptionID string) CloudServiceOperatingSystemsClient { + return NewCloudServiceOperatingSystemsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCloudServiceOperatingSystemsClientWithBaseURI creates an instance of the CloudServiceOperatingSystemsClient +// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI +// (sovereign clouds, Azure stack). +func NewCloudServiceOperatingSystemsClientWithBaseURI(baseURI string, subscriptionID string) CloudServiceOperatingSystemsClient { + return CloudServiceOperatingSystemsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetOSFamily gets properties of a guest operating system family that can be specified in the XML service +// configuration (.cscfg) for a cloud service. +// Parameters: +// location - name of the location that the OS family pertains to. +// osFamilyName - name of the OS family. +func (client CloudServiceOperatingSystemsClient) GetOSFamily(ctx context.Context, location string, osFamilyName string) (result OSFamily, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.GetOSFamily") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetOSFamilyPreparer(ctx, location, osFamilyName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSFamily", nil, "Failure preparing request") + return + } + + resp, err := client.GetOSFamilySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSFamily", resp, "Failure sending request") + return + } + + result, err = client.GetOSFamilyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSFamily", resp, "Failure responding to request") + return + } + + return +} + +// GetOSFamilyPreparer prepares the GetOSFamily request. +func (client CloudServiceOperatingSystemsClient) GetOSFamilyPreparer(ctx context.Context, location string, osFamilyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "osFamilyName": autorest.Encode("path", osFamilyName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsFamilies/{osFamilyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetOSFamilySender sends the GetOSFamily request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceOperatingSystemsClient) GetOSFamilySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetOSFamilyResponder handles the response to the GetOSFamily request. The method always +// closes the http.Response Body. +func (client CloudServiceOperatingSystemsClient) GetOSFamilyResponder(resp *http.Response) (result OSFamily, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetOSVersion gets properties of a guest operating system version that can be specified in the XML service +// configuration (.cscfg) for a cloud service. +// Parameters: +// location - name of the location that the OS version pertains to. +// osVersionName - name of the OS version. +func (client CloudServiceOperatingSystemsClient) GetOSVersion(ctx context.Context, location string, osVersionName string) (result OSVersion, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.GetOSVersion") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetOSVersionPreparer(ctx, location, osVersionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSVersion", nil, "Failure preparing request") + return + } + + resp, err := client.GetOSVersionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSVersion", resp, "Failure sending request") + return + } + + result, err = client.GetOSVersionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSVersion", resp, "Failure responding to request") + return + } + + return +} + +// GetOSVersionPreparer prepares the GetOSVersion request. +func (client CloudServiceOperatingSystemsClient) GetOSVersionPreparer(ctx context.Context, location string, osVersionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "osVersionName": autorest.Encode("path", osVersionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsVersions/{osVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetOSVersionSender sends the GetOSVersion request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceOperatingSystemsClient) GetOSVersionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetOSVersionResponder handles the response to the GetOSVersion request. The method always +// closes the http.Response Body. +func (client CloudServiceOperatingSystemsClient) GetOSVersionResponder(resp *http.Response) (result OSVersion, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListOSFamilies gets a list of all guest operating system families available to be specified in the XML service +// configuration (.cscfg) for a cloud service. Use nextLink property in the response to get the next page of OS +// Families. Do this till nextLink is null to fetch all the OS Families. +// Parameters: +// location - name of the location that the OS families pertain to. +func (client CloudServiceOperatingSystemsClient) ListOSFamilies(ctx context.Context, location string) (result OSFamilyListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSFamilies") + defer func() { + sc := -1 + if result.oflr.Response.Response != nil { + sc = result.oflr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listOSFamiliesNextResults + req, err := client.ListOSFamiliesPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSFamilies", nil, "Failure preparing request") + return + } + + resp, err := client.ListOSFamiliesSender(req) + if err != nil { + result.oflr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSFamilies", resp, "Failure sending request") + return + } + + result.oflr, err = client.ListOSFamiliesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSFamilies", resp, "Failure responding to request") + return + } + if result.oflr.hasNextLink() && result.oflr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListOSFamiliesPreparer prepares the ListOSFamilies request. +func (client CloudServiceOperatingSystemsClient) ListOSFamiliesPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsFamilies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListOSFamiliesSender sends the ListOSFamilies request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceOperatingSystemsClient) ListOSFamiliesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListOSFamiliesResponder handles the response to the ListOSFamilies request. The method always +// closes the http.Response Body. +func (client CloudServiceOperatingSystemsClient) ListOSFamiliesResponder(resp *http.Response) (result OSFamilyListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listOSFamiliesNextResults retrieves the next set of results, if any. +func (client CloudServiceOperatingSystemsClient) listOSFamiliesNextResults(ctx context.Context, lastResults OSFamilyListResult) (result OSFamilyListResult, err error) { + req, err := lastResults.oSFamilyListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSFamiliesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListOSFamiliesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSFamiliesNextResults", resp, "Failure sending next results request") + } + result, err = client.ListOSFamiliesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSFamiliesNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListOSFamiliesComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServiceOperatingSystemsClient) ListOSFamiliesComplete(ctx context.Context, location string) (result OSFamilyListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSFamilies") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListOSFamilies(ctx, location) + return +} + +// ListOSVersions gets a list of all guest operating system versions available to be specified in the XML service +// configuration (.cscfg) for a cloud service. Use nextLink property in the response to get the next page of OS +// versions. Do this till nextLink is null to fetch all the OS versions. +// Parameters: +// location - name of the location that the OS versions pertain to. +func (client CloudServiceOperatingSystemsClient) ListOSVersions(ctx context.Context, location string) (result OSVersionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSVersions") + defer func() { + sc := -1 + if result.ovlr.Response.Response != nil { + sc = result.ovlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listOSVersionsNextResults + req, err := client.ListOSVersionsPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSVersions", nil, "Failure preparing request") + return + } + + resp, err := client.ListOSVersionsSender(req) + if err != nil { + result.ovlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSVersions", resp, "Failure sending request") + return + } + + result.ovlr, err = client.ListOSVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSVersions", resp, "Failure responding to request") + return + } + if result.ovlr.hasNextLink() && result.ovlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListOSVersionsPreparer prepares the ListOSVersions request. +func (client CloudServiceOperatingSystemsClient) ListOSVersionsPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsVersions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListOSVersionsSender sends the ListOSVersions request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceOperatingSystemsClient) ListOSVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListOSVersionsResponder handles the response to the ListOSVersions request. The method always +// closes the http.Response Body. +func (client CloudServiceOperatingSystemsClient) ListOSVersionsResponder(resp *http.Response) (result OSVersionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listOSVersionsNextResults retrieves the next set of results, if any. +func (client CloudServiceOperatingSystemsClient) listOSVersionsNextResults(ctx context.Context, lastResults OSVersionListResult) (result OSVersionListResult, err error) { + req, err := lastResults.oSVersionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSVersionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListOSVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSVersionsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListOSVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSVersionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListOSVersionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServiceOperatingSystemsClient) ListOSVersionsComplete(ctx context.Context, location string) (result OSVersionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSVersions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListOSVersions(ctx, location) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go new file mode 100644 index 000000000000..4edea0aec5d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go @@ -0,0 +1,699 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CloudServiceRoleInstancesClient is the compute Client +type CloudServiceRoleInstancesClient struct { + BaseClient +} + +// NewCloudServiceRoleInstancesClient creates an instance of the CloudServiceRoleInstancesClient client. +func NewCloudServiceRoleInstancesClient(subscriptionID string) CloudServiceRoleInstancesClient { + return NewCloudServiceRoleInstancesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCloudServiceRoleInstancesClientWithBaseURI creates an instance of the CloudServiceRoleInstancesClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewCloudServiceRoleInstancesClientWithBaseURI(baseURI string, subscriptionID string) CloudServiceRoleInstancesClient { + return CloudServiceRoleInstancesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Delete deletes a role instance from a cloud service. +// Parameters: +// roleInstanceName - name of the role instance. +func (client CloudServiceRoleInstancesClient) Delete(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client CloudServiceRoleInstancesClient) DeletePreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) DeleteSender(req *http.Request) (future CloudServiceRoleInstancesDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a role instance from a cloud service. +// Parameters: +// roleInstanceName - name of the role instance. +// expand - the expand expression to apply to the operation. 'UserData' is not supported for cloud services. +func (client CloudServiceRoleInstancesClient) Get(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (result RoleInstance, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CloudServiceRoleInstancesClient) GetPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) GetResponder(resp *http.Response) (result RoleInstance, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView retrieves information about the run-time state of a role instance in a cloud service. +// Parameters: +// roleInstanceName - name of the role instance. +func (client CloudServiceRoleInstancesClient) GetInstanceView(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result RoleInstanceInstanceView, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.GetInstanceView") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetInstanceViewPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetInstanceView", nil, "Failure preparing request") + return + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetInstanceView", resp, "Failure sending request") + return + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetInstanceView", resp, "Failure responding to request") + return + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client CloudServiceRoleInstancesClient) GetInstanceViewPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) GetInstanceViewResponder(resp *http.Response) (result RoleInstanceInstanceView, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetRemoteDesktopFile gets a remote desktop file for a role instance in a cloud service. +// Parameters: +// roleInstanceName - name of the role instance. +func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFile(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.GetRemoteDesktopFile") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetRemoteDesktopFilePreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetRemoteDesktopFile", nil, "Failure preparing request") + return + } + + resp, err := client.GetRemoteDesktopFileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetRemoteDesktopFile", resp, "Failure sending request") + return + } + + result, err = client.GetRemoteDesktopFileResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetRemoteDesktopFile", resp, "Failure responding to request") + return + } + + return +} + +// GetRemoteDesktopFilePreparer prepares the GetRemoteDesktopFile request. +func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFilePreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/remoteDesktopFile", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetRemoteDesktopFileSender sends the GetRemoteDesktopFile request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFileSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetRemoteDesktopFileResponder handles the response to the GetRemoteDesktopFile request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFileResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK)) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets the list of all role instances in a cloud service. Use nextLink property in the response to get the next +// page of role instances. Do this till nextLink is null to fetch all the role instances. +// Parameters: +// expand - the expand expression to apply to the operation. 'UserData' is not supported for cloud services. +func (client CloudServiceRoleInstancesClient) List(ctx context.Context, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (result RoleInstanceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.List") + defer func() { + sc := -1 + if result.rilr.Response.Response != nil { + sc = result.rilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, cloudServiceName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "List", resp, "Failure sending request") + return + } + + result.rilr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "List", resp, "Failure responding to request") + return + } + if result.rilr.hasNextLink() && result.rilr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client CloudServiceRoleInstancesClient) ListPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) ListResponder(resp *http.Response) (result RoleInstanceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client CloudServiceRoleInstancesClient) listNextResults(ctx context.Context, lastResults RoleInstanceListResult) (result RoleInstanceListResult, err error) { + req, err := lastResults.roleInstanceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServiceRoleInstancesClient) ListComplete(ctx context.Context, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (result RoleInstanceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, cloudServiceName, expand) + return +} + +// Rebuild the Rebuild Role Instance asynchronous operation reinstalls the operating system on instances of web roles +// or worker roles and initializes the storage resources that are used by them. If you do not want to initialize +// storage resources, you can use Reimage Role Instance. +// Parameters: +// roleInstanceName - name of the role instance. +func (client CloudServiceRoleInstancesClient) Rebuild(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesRebuildFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Rebuild") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RebuildPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Rebuild", nil, "Failure preparing request") + return + } + + result, err = client.RebuildSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Rebuild", result.Response(), "Failure sending request") + return + } + + return +} + +// RebuildPreparer prepares the Rebuild request. +func (client CloudServiceRoleInstancesClient) RebuildPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RebuildSender sends the Rebuild request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) RebuildSender(req *http.Request) (future CloudServiceRoleInstancesRebuildFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// RebuildResponder handles the response to the Rebuild request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) RebuildResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage the Reimage Role Instance asynchronous operation reinstalls the operating system on instances of web roles +// or worker roles. +// Parameters: +// roleInstanceName - name of the role instance. +func (client CloudServiceRoleInstancesClient) Reimage(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesReimageFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Reimage") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ReimagePreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Reimage", nil, "Failure preparing request") + return + } + + result, err = client.ReimageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Reimage", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client CloudServiceRoleInstancesClient) ReimagePreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) ReimageSender(req *http.Request) (future CloudServiceRoleInstancesReimageFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the Reboot Role Instance asynchronous operation requests a reboot of a role instance in the cloud service. +// Parameters: +// roleInstanceName - name of the role instance. +func (client CloudServiceRoleInstancesClient) Restart(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesRestartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Restart") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RestartPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Restart", nil, "Failure preparing request") + return + } + + result, err = client.RestartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Restart", result.Response(), "Failure sending request") + return + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client CloudServiceRoleInstancesClient) RestartPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleInstanceName": autorest.Encode("path", roleInstanceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRoleInstancesClient) RestartSender(req *http.Request) (future CloudServiceRoleInstancesRestartFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client CloudServiceRoleInstancesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go new file mode 100644 index 000000000000..dac28bd988b1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go @@ -0,0 +1,224 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CloudServiceRolesClient is the compute Client +type CloudServiceRolesClient struct { + BaseClient +} + +// NewCloudServiceRolesClient creates an instance of the CloudServiceRolesClient client. +func NewCloudServiceRolesClient(subscriptionID string) CloudServiceRolesClient { + return NewCloudServiceRolesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCloudServiceRolesClientWithBaseURI creates an instance of the CloudServiceRolesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewCloudServiceRolesClientWithBaseURI(baseURI string, subscriptionID string) CloudServiceRolesClient { + return CloudServiceRolesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a role from a cloud service. +// Parameters: +// roleName - name of the role. +func (client CloudServiceRolesClient) Get(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRole, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRolesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, roleName, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CloudServiceRolesClient) GetPreparer(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "roleName": autorest.Encode("path", roleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRolesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CloudServiceRolesClient) GetResponder(resp *http.Response) (result CloudServiceRole, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all roles in a cloud service. Use nextLink property in the response to get the next page of +// roles. Do this till nextLink is null to fetch all the roles. +func (client CloudServiceRolesClient) List(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRolesClient.List") + defer func() { + sc := -1 + if result.csrlr.Response.Response != nil { + sc = result.csrlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.csrlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "List", resp, "Failure sending request") + return + } + + result.csrlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "List", resp, "Failure responding to request") + return + } + if result.csrlr.hasNextLink() && result.csrlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client CloudServiceRolesClient) ListPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServiceRolesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client CloudServiceRolesClient) ListResponder(resp *http.Response) (result CloudServiceRoleListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client CloudServiceRolesClient) listNextResults(ctx context.Context, lastResults CloudServiceRoleListResult) (result CloudServiceRoleListResult, err error) { + req, err := lastResults.cloudServiceRoleListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServiceRolesClient) ListComplete(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRolesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, cloudServiceName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go new file mode 100644 index 000000000000..7086dd644a08 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go @@ -0,0 +1,1198 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CloudServicesClient is the compute Client +type CloudServicesClient struct { + BaseClient +} + +// NewCloudServicesClient creates an instance of the CloudServicesClient client. +func NewCloudServicesClient(subscriptionID string) CloudServicesClient { + return NewCloudServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCloudServicesClientWithBaseURI creates an instance of the CloudServicesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewCloudServicesClientWithBaseURI(baseURI string, subscriptionID string) CloudServicesClient { + return CloudServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a cloud service. Please note some properties can be set only during cloud service +// creation. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// parameters - the cloud service object. +func (client CloudServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudService) (result CloudServicesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.CloudServicesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, cloudServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client CloudServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudService) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) CreateOrUpdateSender(req *http.Request) (future CloudServicesCreateOrUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) CreateOrUpdateResponder(resp *http.Response) (result CloudService, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +func (client CloudServicesClient) Delete(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServicesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client CloudServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) DeleteSender(req *http.Request) (future CloudServicesDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteInstances deletes role instances in a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// parameters - list of cloud service role instance names. +func (client CloudServicesClient) DeleteInstances(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesDeleteInstancesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.DeleteInstances") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.CloudServicesClient", "DeleteInstances", err.Error()) + } + + req, err := client.DeleteInstancesPreparer(ctx, resourceGroupName, cloudServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "DeleteInstances", nil, "Failure preparing request") + return + } + + result, err = client.DeleteInstancesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "DeleteInstances", result.Response(), "Failure sending request") + return + } + + return +} + +// DeleteInstancesPreparer prepares the DeleteInstances request. +func (client CloudServicesClient) DeleteInstancesPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteInstancesSender sends the DeleteInstances request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) DeleteInstancesSender(req *http.Request) (future CloudServicesDeleteInstancesFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get display information about a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +func (client CloudServicesClient) Get(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudService, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CloudServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) GetResponder(resp *http.Response) (result CloudService, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView gets the status of a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +func (client CloudServicesClient) GetInstanceView(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServiceInstanceView, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.GetInstanceView") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "GetInstanceView", nil, "Failure preparing request") + return + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "GetInstanceView", resp, "Failure sending request") + return + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "GetInstanceView", resp, "Failure responding to request") + return + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client CloudServicesClient) GetInstanceViewPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) GetInstanceViewResponder(resp *http.Response) (result CloudServiceInstanceView, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all cloud services under a resource group. Use nextLink property in the response to get the next +// page of Cloud Services. Do this till nextLink is null to fetch all the Cloud Services. +// Parameters: +// resourceGroupName - name of the resource group. +func (client CloudServicesClient) List(ctx context.Context, resourceGroupName string) (result CloudServiceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.List") + defer func() { + sc := -1 + if result.cslr.Response.Response != nil { + sc = result.cslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "List", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "List", resp, "Failure responding to request") + return + } + if result.cslr.hasNextLink() && result.cslr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client CloudServicesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) ListResponder(resp *http.Response) (result CloudServiceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client CloudServicesClient) listNextResults(ctx context.Context, lastResults CloudServiceListResult) (result CloudServiceListResult, err error) { + req, err := lastResults.cloudServiceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServicesClient) ListComplete(ctx context.Context, resourceGroupName string) (result CloudServiceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets a list of all cloud services in the subscription, regardless of the associated resource group. Use +// nextLink property in the response to get the next page of Cloud Services. Do this till nextLink is null to fetch all +// the Cloud Services. +func (client CloudServicesClient) ListAll(ctx context.Context) (result CloudServiceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.ListAll") + defer func() { + sc := -1 + if result.cslr.Response.Response != nil { + sc = result.cslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "ListAll", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "ListAll", resp, "Failure responding to request") + return + } + if result.cslr.hasNextLink() && result.cslr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client CloudServicesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) ListAllResponder(resp *http.Response) (result CloudServiceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client CloudServicesClient) listAllNextResults(ctx context.Context, lastResults CloudServiceListResult) (result CloudServiceListResult, err error) { + req, err := lastResults.cloudServiceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServicesClient) ListAllComplete(ctx context.Context) (result CloudServiceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} + +// PowerOff power off the cloud service. Note that resources are still attached and you are getting charged for the +// resources. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +func (client CloudServicesClient) PowerOff(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServicesPowerOffFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.PowerOff") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PowerOffPreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "PowerOff", nil, "Failure preparing request") + return + } + + result, err = client.PowerOffSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "PowerOff", result.Response(), "Failure sending request") + return + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client CloudServicesClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/poweroff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) PowerOffSender(req *http.Request) (future CloudServicesPowerOffFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Rebuild rebuild Role Instances reinstalls the operating system on instances of web roles or worker roles and +// initializes the storage resources that are used by them. If you do not want to initialize storage resources, you can +// use Reimage Role Instances. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// parameters - list of cloud service role instance names. +func (client CloudServicesClient) Rebuild(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesRebuildFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Rebuild") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.CloudServicesClient", "Rebuild", err.Error()) + } + + req, err := client.RebuildPreparer(ctx, resourceGroupName, cloudServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Rebuild", nil, "Failure preparing request") + return + } + + result, err = client.RebuildSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Rebuild", result.Response(), "Failure sending request") + return + } + + return +} + +// RebuildPreparer prepares the Rebuild request. +func (client CloudServicesClient) RebuildPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/rebuild", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RebuildSender sends the Rebuild request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) RebuildSender(req *http.Request) (future CloudServicesRebuildFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// RebuildResponder handles the response to the Rebuild request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) RebuildResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage reimage asynchronous operation reinstalls the operating system on instances of web roles or worker roles. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// parameters - list of cloud service role instance names. +func (client CloudServicesClient) Reimage(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesReimageFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Reimage") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.CloudServicesClient", "Reimage", err.Error()) + } + + req, err := client.ReimagePreparer(ctx, resourceGroupName, cloudServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Reimage", nil, "Failure preparing request") + return + } + + result, err = client.ReimageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Reimage", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client CloudServicesClient) ReimagePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) ReimageSender(req *http.Request) (future CloudServicesReimageFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart restarts one or more role instances in a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// parameters - list of cloud service role instance names. +func (client CloudServicesClient) Restart(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesRestartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Restart") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.CloudServicesClient", "Restart", err.Error()) + } + + req, err := client.RestartPreparer(ctx, resourceGroupName, cloudServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Restart", nil, "Failure preparing request") + return + } + + result, err = client.RestartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Restart", result.Response(), "Failure sending request") + return + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client CloudServicesClient) RestartPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) RestartSender(req *http.Request) (future CloudServicesRestartFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start starts the cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +func (client CloudServicesClient) Start(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServicesStartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Start") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.StartPreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client CloudServicesClient) StartPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) StartSender(req *http.Request) (future CloudServicesStartFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update update a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// parameters - the cloud service object. +func (client CloudServicesClient) Update(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudServiceUpdate) (result CloudServicesUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Update") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, cloudServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client CloudServicesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudServiceUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesClient) UpdateSender(req *http.Request) (future CloudServicesUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client CloudServicesClient) UpdateResponder(resp *http.Response) (result CloudService, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go new file mode 100644 index 000000000000..eabe4deefce2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go @@ -0,0 +1,319 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CloudServicesUpdateDomainClient is the compute Client +type CloudServicesUpdateDomainClient struct { + BaseClient +} + +// NewCloudServicesUpdateDomainClient creates an instance of the CloudServicesUpdateDomainClient client. +func NewCloudServicesUpdateDomainClient(subscriptionID string) CloudServicesUpdateDomainClient { + return NewCloudServicesUpdateDomainClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCloudServicesUpdateDomainClientWithBaseURI creates an instance of the CloudServicesUpdateDomainClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewCloudServicesUpdateDomainClientWithBaseURI(baseURI string, subscriptionID string) CloudServicesUpdateDomainClient { + return CloudServicesUpdateDomainClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetUpdateDomain gets the specified update domain of a cloud service. Use nextLink property in the response to get +// the next page of update domains. Do this till nextLink is null to fetch all the update domains. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// updateDomain - specifies an integer value that identifies the update domain. Update domains are identified +// with a zero-based index: the first update domain has an ID of 0, the second has an ID of 1, and so on. +func (client CloudServicesUpdateDomainClient) GetUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32) (result UpdateDomain, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.GetUpdateDomain") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetUpdateDomainPreparer(ctx, resourceGroupName, cloudServiceName, updateDomain) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "GetUpdateDomain", nil, "Failure preparing request") + return + } + + resp, err := client.GetUpdateDomainSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "GetUpdateDomain", resp, "Failure sending request") + return + } + + result, err = client.GetUpdateDomainResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "GetUpdateDomain", resp, "Failure responding to request") + return + } + + return +} + +// GetUpdateDomainPreparer prepares the GetUpdateDomain request. +func (client CloudServicesUpdateDomainClient) GetUpdateDomainPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "updateDomain": autorest.Encode("path", updateDomain), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains/{updateDomain}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetUpdateDomainSender sends the GetUpdateDomain request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesUpdateDomainClient) GetUpdateDomainSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetUpdateDomainResponder handles the response to the GetUpdateDomain request. The method always +// closes the http.Response Body. +func (client CloudServicesUpdateDomainClient) GetUpdateDomainResponder(resp *http.Response) (result UpdateDomain, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListUpdateDomains gets a list of all update domains in a cloud service. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +func (client CloudServicesUpdateDomainClient) ListUpdateDomains(ctx context.Context, resourceGroupName string, cloudServiceName string) (result UpdateDomainListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.ListUpdateDomains") + defer func() { + sc := -1 + if result.udlr.Response.Response != nil { + sc = result.udlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listUpdateDomainsNextResults + req, err := client.ListUpdateDomainsPreparer(ctx, resourceGroupName, cloudServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "ListUpdateDomains", nil, "Failure preparing request") + return + } + + resp, err := client.ListUpdateDomainsSender(req) + if err != nil { + result.udlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "ListUpdateDomains", resp, "Failure sending request") + return + } + + result.udlr, err = client.ListUpdateDomainsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "ListUpdateDomains", resp, "Failure responding to request") + return + } + if result.udlr.hasNextLink() && result.udlr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListUpdateDomainsPreparer prepares the ListUpdateDomains request. +func (client CloudServicesUpdateDomainClient) ListUpdateDomainsPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListUpdateDomainsSender sends the ListUpdateDomains request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesUpdateDomainClient) ListUpdateDomainsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListUpdateDomainsResponder handles the response to the ListUpdateDomains request. The method always +// closes the http.Response Body. +func (client CloudServicesUpdateDomainClient) ListUpdateDomainsResponder(resp *http.Response) (result UpdateDomainListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listUpdateDomainsNextResults retrieves the next set of results, if any. +func (client CloudServicesUpdateDomainClient) listUpdateDomainsNextResults(ctx context.Context, lastResults UpdateDomainListResult) (result UpdateDomainListResult, err error) { + req, err := lastResults.updateDomainListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "listUpdateDomainsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListUpdateDomainsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "listUpdateDomainsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListUpdateDomainsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "listUpdateDomainsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListUpdateDomainsComplete enumerates all values, automatically crossing page boundaries as required. +func (client CloudServicesUpdateDomainClient) ListUpdateDomainsComplete(ctx context.Context, resourceGroupName string, cloudServiceName string) (result UpdateDomainListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.ListUpdateDomains") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListUpdateDomains(ctx, resourceGroupName, cloudServiceName) + return +} + +// WalkUpdateDomain updates the role instances in the specified update domain. +// Parameters: +// resourceGroupName - name of the resource group. +// cloudServiceName - name of the cloud service. +// updateDomain - specifies an integer value that identifies the update domain. Update domains are identified +// with a zero-based index: the first update domain has an ID of 0, the second has an ID of 1, and so on. +// parameters - the update domain object. +func (client CloudServicesUpdateDomainClient) WalkUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, parameters *UpdateDomain) (result CloudServicesUpdateDomainWalkUpdateDomainFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.WalkUpdateDomain") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.WalkUpdateDomainPreparer(ctx, resourceGroupName, cloudServiceName, updateDomain, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "WalkUpdateDomain", nil, "Failure preparing request") + return + } + + result, err = client.WalkUpdateDomainSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "WalkUpdateDomain", result.Response(), "Failure sending request") + return + } + + return +} + +// WalkUpdateDomainPreparer prepares the WalkUpdateDomain request. +func (client CloudServicesUpdateDomainClient) WalkUpdateDomainPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, parameters *UpdateDomain) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "cloudServiceName": autorest.Encode("path", cloudServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "updateDomain": autorest.Encode("path", updateDomain), + } + + const APIVersion = "2021-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains/{updateDomain}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// WalkUpdateDomainSender sends the WalkUpdateDomain request. The method will close the +// http.Response Body if it receives an error. +func (client CloudServicesUpdateDomainClient) WalkUpdateDomainSender(req *http.Request) (future CloudServicesUpdateDomainWalkUpdateDomainFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// WalkUpdateDomainResponder handles the response to the WalkUpdateDomain request. The method always +// closes the http.Response Body. +func (client CloudServicesUpdateDomainClient) WalkUpdateDomainResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go new file mode 100644 index 000000000000..99596387171d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go @@ -0,0 +1,108 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CommunityGalleriesClient is the compute Client +type CommunityGalleriesClient struct { + BaseClient +} + +// NewCommunityGalleriesClient creates an instance of the CommunityGalleriesClient client. +func NewCommunityGalleriesClient(subscriptionID string) CommunityGalleriesClient { + return NewCommunityGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCommunityGalleriesClientWithBaseURI creates an instance of the CommunityGalleriesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewCommunityGalleriesClientWithBaseURI(baseURI string, subscriptionID string) CommunityGalleriesClient { + return CommunityGalleriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a community gallery by gallery public name. +// Parameters: +// location - resource location. +// publicGalleryName - the public name of the community gallery. +func (client CommunityGalleriesClient) Get(ctx context.Context, location string, publicGalleryName string) (result CommunityGallery, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CommunityGalleriesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, location, publicGalleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CommunityGalleriesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CommunityGalleriesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CommunityGalleriesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CommunityGalleriesClient) GetPreparer(ctx context.Context, location string, publicGalleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publicGalleryName": autorest.Encode("path", publicGalleryName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CommunityGalleriesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CommunityGalleriesClient) GetResponder(resp *http.Response) (result CommunityGallery, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go new file mode 100644 index 000000000000..ef82a37bc941 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go @@ -0,0 +1,110 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CommunityGalleryImagesClient is the compute Client +type CommunityGalleryImagesClient struct { + BaseClient +} + +// NewCommunityGalleryImagesClient creates an instance of the CommunityGalleryImagesClient client. +func NewCommunityGalleryImagesClient(subscriptionID string) CommunityGalleryImagesClient { + return NewCommunityGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCommunityGalleryImagesClientWithBaseURI creates an instance of the CommunityGalleryImagesClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewCommunityGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) CommunityGalleryImagesClient { + return CommunityGalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a community gallery image. +// Parameters: +// location - resource location. +// publicGalleryName - the public name of the community gallery. +// galleryImageName - the name of the community gallery image definition. +func (client CommunityGalleryImagesClient) Get(ctx context.Context, location string, publicGalleryName string, galleryImageName string) (result CommunityGalleryImage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CommunityGalleryImagesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, location, publicGalleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImagesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CommunityGalleryImagesClient) GetPreparer(ctx context.Context, location string, publicGalleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "location": autorest.Encode("path", location), + "publicGalleryName": autorest.Encode("path", publicGalleryName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}/images/{galleryImageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CommunityGalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CommunityGalleryImagesClient) GetResponder(resp *http.Response) (result CommunityGalleryImage, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go new file mode 100644 index 000000000000..03d67523dff7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go @@ -0,0 +1,114 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// CommunityGalleryImageVersionsClient is the compute Client +type CommunityGalleryImageVersionsClient struct { + BaseClient +} + +// NewCommunityGalleryImageVersionsClient creates an instance of the CommunityGalleryImageVersionsClient client. +func NewCommunityGalleryImageVersionsClient(subscriptionID string) CommunityGalleryImageVersionsClient { + return NewCommunityGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCommunityGalleryImageVersionsClientWithBaseURI creates an instance of the CommunityGalleryImageVersionsClient +// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI +// (sovereign clouds, Azure stack). +func NewCommunityGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) CommunityGalleryImageVersionsClient { + return CommunityGalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a community gallery image version. +// Parameters: +// location - resource location. +// publicGalleryName - the public name of the community gallery. +// galleryImageName - the name of the community gallery image definition. +// galleryImageVersionName - the name of the community gallery image version. Needs to follow semantic version +// name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit +// integer. Format: .. +func (client CommunityGalleryImageVersionsClient) Get(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string) (result CommunityGalleryImageVersion, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CommunityGalleryImageVersionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, location, publicGalleryName, galleryImageName, galleryImageVersionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImageVersionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImageVersionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImageVersionsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client CommunityGalleryImageVersionsClient) GetPreparer(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "location": autorest.Encode("path", location), + "publicGalleryName": autorest.Encode("path", publicGalleryName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CommunityGalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CommunityGalleryImageVersionsClient) GetResponder(resp *http.Response) (result CommunityGalleryImageVersion, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/dedicatedhostgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/dedicatedhostgroups.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go index 4c72632aefc1..67ace3df4d8e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/dedicatedhostgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go @@ -88,7 +88,7 @@ func (client DedicatedHostGroupsClient) CreateOrUpdatePreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -166,7 +166,7 @@ func (client DedicatedHostGroupsClient) DeletePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -200,8 +200,9 @@ func (client DedicatedHostGroupsClient) DeleteResponder(resp *http.Response) (re // Parameters: // resourceGroupName - the name of the resource group. // hostGroupName - the name of the dedicated host group. -// expand - the expand expression to apply on the operation. The response shows the list of instance view of -// the dedicated hosts under the dedicated host group. +// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance +// views of the dedicated hosts under the dedicated host group. 'UserData' is not supported for dedicated host +// group. func (client DedicatedHostGroupsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, expand InstanceViewTypes) (result DedicatedHostGroup, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.Get") @@ -243,7 +244,7 @@ func (client DedicatedHostGroupsClient) GetPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -326,7 +327,7 @@ func (client DedicatedHostGroupsClient) ListByResourceGroupPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -440,7 +441,7 @@ func (client DedicatedHostGroupsClient) ListBySubscriptionPreparer(ctx context.C "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -554,7 +555,7 @@ func (client DedicatedHostGroupsClient) UpdatePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/dedicatedhosts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/dedicatedhosts.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go index 9cfc587ffb2d..a58e6b2b84bb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/dedicatedhosts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go @@ -82,7 +82,7 @@ func (client DedicatedHostsClient) CreateOrUpdatePreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -165,7 +165,7 @@ func (client DedicatedHostsClient) DeletePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -210,7 +210,8 @@ func (client DedicatedHostsClient) DeleteResponder(resp *http.Response) (result // resourceGroupName - the name of the resource group. // hostGroupName - the name of the dedicated host group. // hostName - the name of the dedicated host. -// expand - the expand expression to apply on the operation. +// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance +// views of the dedicated host. 'UserData' is not supported for dedicated host. func (client DedicatedHostsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, expand InstanceViewTypes) (result DedicatedHost, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.Get") @@ -253,7 +254,7 @@ func (client DedicatedHostsClient) GetPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -338,7 +339,7 @@ func (client DedicatedHostsClient) ListByHostGroupPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -447,7 +448,7 @@ func (client DedicatedHostsClient) UpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskaccesses.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskaccesses.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go index 45105cae000e..618db0d550a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskaccesses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go @@ -35,8 +35,8 @@ func NewDiskAccessesClientWithBaseURI(baseURI string, subscriptionID string) Dis // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. // diskAccess - disk access object supplied in the body of the Put disk access operation. func (client DiskAccessesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess) (result DiskAccessesCreateOrUpdateFuture, err error) { if tracing.IsEnabled() { @@ -72,7 +72,7 @@ func (client DiskAccessesClient) CreateOrUpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -119,8 +119,8 @@ func (client DiskAccessesClient) CreateOrUpdateResponder(resp *http.Response) (r // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. func (client DiskAccessesClient) Delete(ctx context.Context, resourceGroupName string, diskAccessName string) (result DiskAccessesDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.Delete") @@ -155,7 +155,7 @@ func (client DiskAccessesClient) DeletePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -199,9 +199,9 @@ func (client DiskAccessesClient) DeleteResponder(resp *http.Response) (result au // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. -// privateEndpointConnectionName - the name of the private endpoint connection +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// privateEndpointConnectionName - the name of the private endpoint connection. func (client DiskAccessesClient) DeleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string) (result DiskAccessesDeleteAPrivateEndpointConnectionFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.DeleteAPrivateEndpointConnection") @@ -237,7 +237,7 @@ func (client DiskAccessesClient) DeleteAPrivateEndpointConnectionPreparer(ctx co "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -281,8 +281,8 @@ func (client DiskAccessesClient) DeleteAPrivateEndpointConnectionResponder(resp // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. func (client DiskAccessesClient) Get(ctx context.Context, resourceGroupName string, diskAccessName string) (result DiskAccess, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.Get") @@ -324,7 +324,7 @@ func (client DiskAccessesClient) GetPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -359,9 +359,9 @@ func (client DiskAccessesClient) GetResponder(resp *http.Response) (result DiskA // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. -// privateEndpointConnectionName - the name of the private endpoint connection +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// privateEndpointConnectionName - the name of the private endpoint connection. func (client DiskAccessesClient) GetAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.GetAPrivateEndpointConnection") @@ -404,7 +404,7 @@ func (client DiskAccessesClient) GetAPrivateEndpointConnectionPreparer(ctx conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -439,8 +439,8 @@ func (client DiskAccessesClient) GetAPrivateEndpointConnectionResponder(resp *ht // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. func (client DiskAccessesClient) GetPrivateLinkResources(ctx context.Context, resourceGroupName string, diskAccessName string) (result PrivateLinkResourceListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.GetPrivateLinkResources") @@ -482,7 +482,7 @@ func (client DiskAccessesClient) GetPrivateLinkResourcesPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -558,7 +558,7 @@ func (client DiskAccessesClient) ListPreparer(ctx context.Context) (*http.Reques "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -674,7 +674,7 @@ func (client DiskAccessesClient) ListByResourceGroupPreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -746,8 +746,8 @@ func (client DiskAccessesClient) ListByResourceGroupComplete(ctx context.Context // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. func (client DiskAccessesClient) ListPrivateEndpointConnections(ctx context.Context, resourceGroupName string, diskAccessName string) (result PrivateEndpointConnectionListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.ListPrivateEndpointConnections") @@ -794,7 +794,7 @@ func (client DiskAccessesClient) ListPrivateEndpointConnectionsPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -866,8 +866,8 @@ func (client DiskAccessesClient) ListPrivateEndpointConnectionsComplete(ctx cont // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. // diskAccess - disk access object supplied in the body of the Patch disk access operation. func (client DiskAccessesClient) Update(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate) (result DiskAccessesUpdateFuture, err error) { if tracing.IsEnabled() { @@ -903,7 +903,7 @@ func (client DiskAccessesClient) UpdatePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -951,9 +951,9 @@ func (client DiskAccessesClient) UpdateResponder(resp *http.Response) (result Di // Parameters: // resourceGroupName - the name of the resource group. // diskAccessName - the name of the disk access resource that is being created. The name can't be changed after -// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum -// name length is 80 characters. -// privateEndpointConnectionName - the name of the private endpoint connection +// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// privateEndpointConnectionName - the name of the private endpoint connection. // privateEndpointConnection - private endpoint connection object supplied in the body of the Put private // endpoint connection operation. func (client DiskAccessesClient) UpdateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection) (result DiskAccessesUpdateAPrivateEndpointConnectionFuture, err error) { @@ -998,7 +998,7 @@ func (client DiskAccessesClient) UpdateAPrivateEndpointConnectionPreparer(ctx co "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskencryptionsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskencryptionsets.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go index 37f51fe19198..af979a6e8909 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskencryptionsets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go @@ -36,7 +36,7 @@ func NewDiskEncryptionSetsClientWithBaseURI(baseURI string, subscriptionID strin // Parameters: // resourceGroupName - the name of the resource group. // diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed -// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The // maximum name length is 80 characters. // diskEncryptionSet - disk encryption set object supplied in the body of the Put disk encryption set // operation. @@ -83,7 +83,7 @@ func (client DiskEncryptionSetsClient) CreateOrUpdatePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -130,7 +130,7 @@ func (client DiskEncryptionSetsClient) CreateOrUpdateResponder(resp *http.Respon // Parameters: // resourceGroupName - the name of the resource group. // diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed -// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The // maximum name length is 80 characters. func (client DiskEncryptionSetsClient) Delete(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result DiskEncryptionSetsDeleteFuture, err error) { if tracing.IsEnabled() { @@ -166,7 +166,7 @@ func (client DiskEncryptionSetsClient) DeletePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -210,7 +210,7 @@ func (client DiskEncryptionSetsClient) DeleteResponder(resp *http.Response) (res // Parameters: // resourceGroupName - the name of the resource group. // diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed -// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The // maximum name length is 80 characters. func (client DiskEncryptionSetsClient) Get(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result DiskEncryptionSet, err error) { if tracing.IsEnabled() { @@ -253,7 +253,7 @@ func (client DiskEncryptionSetsClient) GetPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -329,7 +329,7 @@ func (client DiskEncryptionSetsClient) ListPreparer(ctx context.Context) (*http. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -401,7 +401,7 @@ func (client DiskEncryptionSetsClient) ListComplete(ctx context.Context) (result // Parameters: // resourceGroupName - the name of the resource group. // diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed -// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The // maximum name length is 80 characters. func (client DiskEncryptionSetsClient) ListAssociatedResources(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result ResourceURIListPage, err error) { if tracing.IsEnabled() { @@ -449,7 +449,7 @@ func (client DiskEncryptionSetsClient) ListAssociatedResourcesPreparer(ctx conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -565,7 +565,7 @@ func (client DiskEncryptionSetsClient) ListByResourceGroupPreparer(ctx context.C "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -637,7 +637,7 @@ func (client DiskEncryptionSetsClient) ListByResourceGroupComplete(ctx context.C // Parameters: // resourceGroupName - the name of the resource group. // diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed -// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The // maximum name length is 80 characters. // diskEncryptionSet - disk encryption set object supplied in the body of the Patch disk encryption set // operation. @@ -675,7 +675,7 @@ func (client DiskEncryptionSetsClient) UpdatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskrestorepoint.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go similarity index 92% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskrestorepoint.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go index 20b31a6d4748..d1fe4a66bc2e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/diskrestorepoint.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go @@ -36,11 +36,8 @@ func NewDiskRestorePointClientWithBaseURI(baseURI string, subscriptionID string) // Parameters: // resourceGroupName - the name of the resource group. // restorePointCollectionName - the name of the restore point collection that the disk restore point belongs. -// Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. Supported -// characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// diskRestorePointName - the name of the disk restore point created. Supported characters for the name are -// a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. +// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. +// diskRestorePointName - the name of the disk restore point created. func (client DiskRestorePointClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string) (result DiskRestorePoint, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.Get") @@ -84,7 +81,7 @@ func (client DiskRestorePointClient) GetPreparer(ctx context.Context, resourceGr "vmRestorePointName": autorest.Encode("path", VMRestorePointName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -119,11 +116,8 @@ func (client DiskRestorePointClient) GetResponder(resp *http.Response) (result D // Parameters: // resourceGroupName - the name of the resource group. // restorePointCollectionName - the name of the restore point collection that the disk restore point belongs. -// Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. Supported -// characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// diskRestorePointName - the name of the disk restore point created. Supported characters for the name are -// a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. +// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. +// diskRestorePointName - the name of the disk restore point created. // grantAccessData - access data object supplied in the body of the get disk access operation. func (client DiskRestorePointClient) GrantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData) (result DiskRestorePointGrantAccessFuture, err error) { if tracing.IsEnabled() { @@ -167,7 +161,7 @@ func (client DiskRestorePointClient) GrantAccessPreparer(ctx context.Context, re "vmRestorePointName": autorest.Encode("path", VMRestorePointName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -214,9 +208,7 @@ func (client DiskRestorePointClient) GrantAccessResponder(resp *http.Response) ( // Parameters: // resourceGroupName - the name of the resource group. // restorePointCollectionName - the name of the restore point collection that the disk restore point belongs. -// Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. Supported -// characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. +// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. func (client DiskRestorePointClient) ListByRestorePoint(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string) (result DiskRestorePointListPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.ListByRestorePoint") @@ -264,7 +256,7 @@ func (client DiskRestorePointClient) ListByRestorePointPreparer(ctx context.Cont "vmRestorePointName": autorest.Encode("path", VMRestorePointName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -336,11 +328,8 @@ func (client DiskRestorePointClient) ListByRestorePointComplete(ctx context.Cont // Parameters: // resourceGroupName - the name of the resource group. // restorePointCollectionName - the name of the restore point collection that the disk restore point belongs. -// Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. Supported -// characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. -// diskRestorePointName - the name of the disk restore point created. Supported characters for the name are -// a-z, A-Z, 0-9 and _. The maximum name length is 80 characters. +// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs. +// diskRestorePointName - the name of the disk restore point created. func (client DiskRestorePointClient) RevokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string) (result DiskRestorePointRevokeAccessFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.RevokeAccess") @@ -377,7 +366,7 @@ func (client DiskRestorePointClient) RevokeAccessPreparer(ctx context.Context, r "vmRestorePointName": autorest.Encode("path", VMRestorePointName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/disks.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go index 1080f7b8558e..4aec8ae542ff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go @@ -35,7 +35,7 @@ func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClien // Parameters: // resourceGroupName - the name of the resource group. // diskName - the name of the managed disk that is being created. The name can't be changed after the disk is -// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 // characters. // disk - disk object supplied in the body of the Put disk operation. func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (result DisksCreateOrUpdateFuture, err error) { @@ -92,7 +92,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,7 +141,7 @@ func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result D // Parameters: // resourceGroupName - the name of the resource group. // diskName - the name of the managed disk that is being created. The name can't be changed after the disk is -// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 // characters. func (client DisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (result DisksDeleteFuture, err error) { if tracing.IsEnabled() { @@ -177,7 +177,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -221,7 +221,7 @@ func (client DisksClient) DeleteResponder(resp *http.Response) (result autorest. // Parameters: // resourceGroupName - the name of the resource group. // diskName - the name of the managed disk that is being created. The name can't be changed after the disk is -// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 // characters. func (client DisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result Disk, err error) { if tracing.IsEnabled() { @@ -264,7 +264,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -299,7 +299,7 @@ func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err er // Parameters: // resourceGroupName - the name of the resource group. // diskName - the name of the managed disk that is being created. The name can't be changed after the disk is -// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 // characters. // grantAccessData - access data object supplied in the body of the get disk access operation. func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (result DisksGrantAccessFuture, err error) { @@ -342,7 +342,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -430,7 +430,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -546,7 +546,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -618,7 +618,7 @@ func (client DisksClient) ListByResourceGroupComplete(ctx context.Context, resou // Parameters: // resourceGroupName - the name of the resource group. // diskName - the name of the managed disk that is being created. The name can't be changed after the disk is -// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 // characters. func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result DisksRevokeAccessFuture, err error) { if tracing.IsEnabled() { @@ -654,7 +654,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -698,7 +698,7 @@ func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result aut // Parameters: // resourceGroupName - the name of the resource group. // diskName - the name of the managed disk that is being created. The name can't be changed after the disk is -// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 // characters. // disk - disk object supplied in the body of the Patch disk operation. func (client DisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (result DisksUpdateFuture, err error) { @@ -735,7 +735,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go similarity index 83% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/enums.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go index 2962ffcdabd8..1edd7683383f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/enums.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go @@ -74,6 +74,50 @@ func PossibleCachingTypesValues() []CachingTypes { return []CachingTypes{CachingTypesNone, CachingTypesReadOnly, CachingTypesReadWrite} } +// CapacityReservationGroupInstanceViewTypes enumerates the values for capacity reservation group instance view +// types. +type CapacityReservationGroupInstanceViewTypes string + +const ( + // CapacityReservationGroupInstanceViewTypesInstanceView ... + CapacityReservationGroupInstanceViewTypesInstanceView CapacityReservationGroupInstanceViewTypes = "instanceView" +) + +// PossibleCapacityReservationGroupInstanceViewTypesValues returns an array of possible values for the CapacityReservationGroupInstanceViewTypes const type. +func PossibleCapacityReservationGroupInstanceViewTypesValues() []CapacityReservationGroupInstanceViewTypes { + return []CapacityReservationGroupInstanceViewTypes{CapacityReservationGroupInstanceViewTypesInstanceView} +} + +// CapacityReservationInstanceViewTypes enumerates the values for capacity reservation instance view types. +type CapacityReservationInstanceViewTypes string + +const ( + // CapacityReservationInstanceViewTypesInstanceView ... + CapacityReservationInstanceViewTypesInstanceView CapacityReservationInstanceViewTypes = "instanceView" +) + +// PossibleCapacityReservationInstanceViewTypesValues returns an array of possible values for the CapacityReservationInstanceViewTypes const type. +func PossibleCapacityReservationInstanceViewTypesValues() []CapacityReservationInstanceViewTypes { + return []CapacityReservationInstanceViewTypes{CapacityReservationInstanceViewTypesInstanceView} +} + +// CloudServiceUpgradeMode enumerates the values for cloud service upgrade mode. +type CloudServiceUpgradeMode string + +const ( + // CloudServiceUpgradeModeAuto ... + CloudServiceUpgradeModeAuto CloudServiceUpgradeMode = "Auto" + // CloudServiceUpgradeModeManual ... + CloudServiceUpgradeModeManual CloudServiceUpgradeMode = "Manual" + // CloudServiceUpgradeModeSimultaneous ... + CloudServiceUpgradeModeSimultaneous CloudServiceUpgradeMode = "Simultaneous" +) + +// PossibleCloudServiceUpgradeModeValues returns an array of possible values for the CloudServiceUpgradeMode const type. +func PossibleCloudServiceUpgradeModeValues() []CloudServiceUpgradeMode { + return []CloudServiceUpgradeMode{CloudServiceUpgradeModeAuto, CloudServiceUpgradeModeManual, CloudServiceUpgradeModeSimultaneous} +} + // ComponentNames enumerates the values for component names. type ComponentNames string @@ -87,6 +131,23 @@ func PossibleComponentNamesValues() []ComponentNames { return []ComponentNames{ComponentNamesMicrosoftWindowsShellSetup} } +// ConsistencyModeTypes enumerates the values for consistency mode types. +type ConsistencyModeTypes string + +const ( + // ConsistencyModeTypesApplicationConsistent ... + ConsistencyModeTypesApplicationConsistent ConsistencyModeTypes = "ApplicationConsistent" + // ConsistencyModeTypesCrashConsistent ... + ConsistencyModeTypesCrashConsistent ConsistencyModeTypes = "CrashConsistent" + // ConsistencyModeTypesFileSystemConsistent ... + ConsistencyModeTypesFileSystemConsistent ConsistencyModeTypes = "FileSystemConsistent" +) + +// PossibleConsistencyModeTypesValues returns an array of possible values for the ConsistencyModeTypes const type. +func PossibleConsistencyModeTypesValues() []ConsistencyModeTypes { + return []ConsistencyModeTypes{ConsistencyModeTypesApplicationConsistent, ConsistencyModeTypesCrashConsistent, ConsistencyModeTypesFileSystemConsistent} +} + // DedicatedHostLicenseTypes enumerates the values for dedicated host license types. type DedicatedHostLicenseTypes string @@ -104,6 +165,21 @@ func PossibleDedicatedHostLicenseTypesValues() []DedicatedHostLicenseTypes { return []DedicatedHostLicenseTypes{DedicatedHostLicenseTypesNone, DedicatedHostLicenseTypesWindowsServerHybrid, DedicatedHostLicenseTypesWindowsServerPerpetual} } +// DeleteOptions enumerates the values for delete options. +type DeleteOptions string + +const ( + // DeleteOptionsDelete ... + DeleteOptionsDelete DeleteOptions = "Delete" + // DeleteOptionsDetach ... + DeleteOptionsDetach DeleteOptions = "Detach" +) + +// PossibleDeleteOptionsValues returns an array of possible values for the DeleteOptions const type. +func PossibleDeleteOptionsValues() []DeleteOptions { + return []DeleteOptions{DeleteOptionsDelete, DeleteOptionsDetach} +} + // DiffDiskOptions enumerates the values for diff disk options. type DiffDiskOptions string @@ -141,6 +217,9 @@ const ( // DiskCreateOptionCopy Create a new disk or snapshot by copying from a disk or snapshot specified by the // given sourceResourceId. DiskCreateOptionCopy DiskCreateOption = "Copy" + // DiskCreateOptionCopyStart Create a new disk by using a deep copy process, where the resource creation is + // considered complete only after all data has been copied from the source. + DiskCreateOptionCopyStart DiskCreateOption = "CopyStart" // DiskCreateOptionEmpty Create an empty data disk of a size given by diskSizeGB. DiskCreateOptionEmpty DiskCreateOption = "Empty" // DiskCreateOptionFromImage Create a new disk from a platform image specified by the given imageReference @@ -158,7 +237,7 @@ const ( // PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type. func PossibleDiskCreateOptionValues() []DiskCreateOption { - return []DiskCreateOption{DiskCreateOptionAttach, DiskCreateOptionCopy, DiskCreateOptionEmpty, DiskCreateOptionFromImage, DiskCreateOptionImport, DiskCreateOptionRestore, DiskCreateOptionUpload} + return []DiskCreateOption{DiskCreateOptionAttach, DiskCreateOptionCopy, DiskCreateOptionCopyStart, DiskCreateOptionEmpty, DiskCreateOptionFromImage, DiskCreateOptionImport, DiskCreateOptionRestore, DiskCreateOptionUpload} } // DiskCreateOptionTypes enumerates the values for disk create option types. @@ -178,6 +257,21 @@ func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage} } +// DiskDeleteOptionTypes enumerates the values for disk delete option types. +type DiskDeleteOptionTypes string + +const ( + // DiskDeleteOptionTypesDelete ... + DiskDeleteOptionTypesDelete DiskDeleteOptionTypes = "Delete" + // DiskDeleteOptionTypesDetach ... + DiskDeleteOptionTypesDetach DiskDeleteOptionTypes = "Detach" +) + +// PossibleDiskDeleteOptionTypesValues returns an array of possible values for the DiskDeleteOptionTypes const type. +func PossibleDiskDeleteOptionTypesValues() []DiskDeleteOptionTypes { + return []DiskDeleteOptionTypes{DiskDeleteOptionTypesDelete, DiskDeleteOptionTypesDetach} +} + // DiskDetachOptionTypes enumerates the values for disk detach option types. type DiskDetachOptionTypes string @@ -244,14 +338,19 @@ type DiskState string const ( // DiskStateActiveSAS The disk currently has an Active SAS Uri associated with it. DiskStateActiveSAS DiskState = "ActiveSAS" + // DiskStateActiveSASFrozen The disk is attached to a VM in hibernated state and has an active SAS URI + // associated with it. + DiskStateActiveSASFrozen DiskState = "ActiveSASFrozen" // DiskStateActiveUpload A disk is created for upload and a write token has been issued for uploading to // it. DiskStateActiveUpload DiskState = "ActiveUpload" - // DiskStateAttached The disk is currently mounted to a running VM. + // DiskStateAttached The disk is currently attached to a running VM. DiskStateAttached DiskState = "Attached" + // DiskStateFrozen The disk is attached to a VM which is in hibernated state. + DiskStateFrozen DiskState = "Frozen" // DiskStateReadyToUpload A disk is ready to be created by upload by requesting a write token. DiskStateReadyToUpload DiskState = "ReadyToUpload" - // DiskStateReserved The disk is mounted to a stopped-deallocated VM + // DiskStateReserved The disk is attached to a stopped-deallocated VM. DiskStateReserved DiskState = "Reserved" // DiskStateUnattached The disk is not being used and can be attached to a VM. DiskStateUnattached DiskState = "Unattached" @@ -259,7 +358,7 @@ const ( // PossibleDiskStateValues returns an array of possible values for the DiskState const type. func PossibleDiskStateValues() []DiskState { - return []DiskState{DiskStateActiveSAS, DiskStateActiveUpload, DiskStateAttached, DiskStateReadyToUpload, DiskStateReserved, DiskStateUnattached} + return []DiskState{DiskStateActiveSAS, DiskStateActiveSASFrozen, DiskStateActiveUpload, DiskStateAttached, DiskStateFrozen, DiskStateReadyToUpload, DiskStateReserved, DiskStateUnattached} } // DiskStorageAccountTypes enumerates the values for disk storage account types. @@ -336,6 +435,48 @@ func PossibleExecutionStateValues() []ExecutionState { return []ExecutionState{ExecutionStateCanceled, ExecutionStateFailed, ExecutionStatePending, ExecutionStateRunning, ExecutionStateSucceeded, ExecutionStateTimedOut, ExecutionStateUnknown} } +// ExpandTypesForGetCapacityReservationGroups enumerates the values for expand types for get capacity +// reservation groups. +type ExpandTypesForGetCapacityReservationGroups string + +const ( + // ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref ... + ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref ExpandTypesForGetCapacityReservationGroups = "virtualMachineScaleSetVMs/$ref" + // ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref ... + ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref ExpandTypesForGetCapacityReservationGroups = "virtualMachines/$ref" +) + +// PossibleExpandTypesForGetCapacityReservationGroupsValues returns an array of possible values for the ExpandTypesForGetCapacityReservationGroups const type. +func PossibleExpandTypesForGetCapacityReservationGroupsValues() []ExpandTypesForGetCapacityReservationGroups { + return []ExpandTypesForGetCapacityReservationGroups{ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref, ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref} +} + +// ExpandTypesForGetVMScaleSets enumerates the values for expand types for get vm scale sets. +type ExpandTypesForGetVMScaleSets string + +const ( + // ExpandTypesForGetVMScaleSetsUserData ... + ExpandTypesForGetVMScaleSetsUserData ExpandTypesForGetVMScaleSets = "userData" +) + +// PossibleExpandTypesForGetVMScaleSetsValues returns an array of possible values for the ExpandTypesForGetVMScaleSets const type. +func PossibleExpandTypesForGetVMScaleSetsValues() []ExpandTypesForGetVMScaleSets { + return []ExpandTypesForGetVMScaleSets{ExpandTypesForGetVMScaleSetsUserData} +} + +// ExtendedLocationType enumerates the values for extended location type. +type ExtendedLocationType string + +const ( + // ExtendedLocationTypeEdgeZone ... + ExtendedLocationTypeEdgeZone ExtendedLocationType = "EdgeZone" +) + +// PossibleExtendedLocationTypeValues returns an array of possible values for the ExtendedLocationType const type. +func PossibleExtendedLocationTypeValues() []ExtendedLocationType { + return []ExtendedLocationType{ExtendedLocationTypeEdgeZone} +} + // ExtendedLocationTypes enumerates the values for extended location types. type ExtendedLocationTypes string @@ -349,6 +490,21 @@ func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes { return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone} } +// GallerySharingPermissionTypes enumerates the values for gallery sharing permission types. +type GallerySharingPermissionTypes string + +const ( + // GallerySharingPermissionTypesGroups ... + GallerySharingPermissionTypesGroups GallerySharingPermissionTypes = "Groups" + // GallerySharingPermissionTypesPrivate ... + GallerySharingPermissionTypesPrivate GallerySharingPermissionTypes = "Private" +) + +// PossibleGallerySharingPermissionTypesValues returns an array of possible values for the GallerySharingPermissionTypes const type. +func PossibleGallerySharingPermissionTypesValues() []GallerySharingPermissionTypes { + return []GallerySharingPermissionTypes{GallerySharingPermissionTypesGroups, GallerySharingPermissionTypesPrivate} +} + // HostCaching enumerates the values for host caching. type HostCaching string @@ -417,11 +573,13 @@ type InstanceViewTypes string const ( // InstanceViewTypesInstanceView ... InstanceViewTypesInstanceView InstanceViewTypes = "instanceView" + // InstanceViewTypesUserData ... + InstanceViewTypesUserData InstanceViewTypes = "userData" ) // PossibleInstanceViewTypesValues returns an array of possible values for the InstanceViewTypes const type. func PossibleInstanceViewTypesValues() []InstanceViewTypes { - return []InstanceViewTypes{InstanceViewTypesInstanceView} + return []InstanceViewTypes{InstanceViewTypesInstanceView, InstanceViewTypesUserData} } // IntervalInMins enumerates the values for interval in mins. @@ -458,6 +616,36 @@ func PossibleIPVersionValues() []IPVersion { return []IPVersion{IPVersionIPv4, IPVersionIPv6} } +// IPVersions enumerates the values for ip versions. +type IPVersions string + +const ( + // IPVersionsIPv4 ... + IPVersionsIPv4 IPVersions = "IPv4" + // IPVersionsIPv6 ... + IPVersionsIPv6 IPVersions = "IPv6" +) + +// PossibleIPVersionsValues returns an array of possible values for the IPVersions const type. +func PossibleIPVersionsValues() []IPVersions { + return []IPVersions{IPVersionsIPv4, IPVersionsIPv6} +} + +// LinuxPatchAssessmentMode enumerates the values for linux patch assessment mode. +type LinuxPatchAssessmentMode string + +const ( + // LinuxPatchAssessmentModeAutomaticByPlatform ... + LinuxPatchAssessmentModeAutomaticByPlatform LinuxPatchAssessmentMode = "AutomaticByPlatform" + // LinuxPatchAssessmentModeImageDefault ... + LinuxPatchAssessmentModeImageDefault LinuxPatchAssessmentMode = "ImageDefault" +) + +// PossibleLinuxPatchAssessmentModeValues returns an array of possible values for the LinuxPatchAssessmentMode const type. +func PossibleLinuxPatchAssessmentModeValues() []LinuxPatchAssessmentMode { + return []LinuxPatchAssessmentMode{LinuxPatchAssessmentModeAutomaticByPlatform, LinuxPatchAssessmentModeImageDefault} +} + // LinuxVMGuestPatchMode enumerates the values for linux vm guest patch mode. type LinuxVMGuestPatchMode string @@ -510,6 +698,19 @@ func PossibleNetworkAccessPolicyValues() []NetworkAccessPolicy { return []NetworkAccessPolicy{NetworkAccessPolicyAllowAll, NetworkAccessPolicyAllowPrivate, NetworkAccessPolicyDenyAll} } +// NetworkAPIVersion enumerates the values for network api version. +type NetworkAPIVersion string + +const ( + // NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne ... + NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne NetworkAPIVersion = "2020-11-01" +) + +// PossibleNetworkAPIVersionValues returns an array of possible values for the NetworkAPIVersion const type. +func PossibleNetworkAPIVersionValues() []NetworkAPIVersion { + return []NetworkAPIVersion{NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne} +} + // OperatingSystemStateTypes enumerates the values for operating system state types. type OperatingSystemStateTypes string @@ -525,6 +726,21 @@ func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes { return []OperatingSystemStateTypes{OperatingSystemStateTypesGeneralized, OperatingSystemStateTypesSpecialized} } +// OperatingSystemType enumerates the values for operating system type. +type OperatingSystemType string + +const ( + // OperatingSystemTypeLinux ... + OperatingSystemTypeLinux OperatingSystemType = "Linux" + // OperatingSystemTypeWindows ... + OperatingSystemTypeWindows OperatingSystemType = "Windows" +) + +// PossibleOperatingSystemTypeValues returns an array of possible values for the OperatingSystemType const type. +func PossibleOperatingSystemTypeValues() []OperatingSystemType { + return []OperatingSystemType{OperatingSystemTypeLinux, OperatingSystemTypeWindows} +} + // OperatingSystemTypes enumerates the values for operating system types. type OperatingSystemTypes string @@ -831,6 +1047,85 @@ func PossibleProximityPlacementGroupTypeValues() []ProximityPlacementGroupType { return []ProximityPlacementGroupType{ProximityPlacementGroupTypeStandard, ProximityPlacementGroupTypeUltra} } +// PublicIPAddressSkuName enumerates the values for public ip address sku name. +type PublicIPAddressSkuName string + +const ( + // PublicIPAddressSkuNameBasic ... + PublicIPAddressSkuNameBasic PublicIPAddressSkuName = "Basic" + // PublicIPAddressSkuNameStandard ... + PublicIPAddressSkuNameStandard PublicIPAddressSkuName = "Standard" +) + +// PossiblePublicIPAddressSkuNameValues returns an array of possible values for the PublicIPAddressSkuName const type. +func PossiblePublicIPAddressSkuNameValues() []PublicIPAddressSkuName { + return []PublicIPAddressSkuName{PublicIPAddressSkuNameBasic, PublicIPAddressSkuNameStandard} +} + +// PublicIPAddressSkuTier enumerates the values for public ip address sku tier. +type PublicIPAddressSkuTier string + +const ( + // PublicIPAddressSkuTierGlobal ... + PublicIPAddressSkuTierGlobal PublicIPAddressSkuTier = "Global" + // PublicIPAddressSkuTierRegional ... + PublicIPAddressSkuTierRegional PublicIPAddressSkuTier = "Regional" +) + +// PossiblePublicIPAddressSkuTierValues returns an array of possible values for the PublicIPAddressSkuTier const type. +func PossiblePublicIPAddressSkuTierValues() []PublicIPAddressSkuTier { + return []PublicIPAddressSkuTier{PublicIPAddressSkuTierGlobal, PublicIPAddressSkuTierRegional} +} + +// PublicIPAllocationMethod enumerates the values for public ip allocation method. +type PublicIPAllocationMethod string + +const ( + // PublicIPAllocationMethodDynamic ... + PublicIPAllocationMethodDynamic PublicIPAllocationMethod = "Dynamic" + // PublicIPAllocationMethodStatic ... + PublicIPAllocationMethodStatic PublicIPAllocationMethod = "Static" +) + +// PossiblePublicIPAllocationMethodValues returns an array of possible values for the PublicIPAllocationMethod const type. +func PossiblePublicIPAllocationMethodValues() []PublicIPAllocationMethod { + return []PublicIPAllocationMethod{PublicIPAllocationMethodDynamic, PublicIPAllocationMethodStatic} +} + +// PublicNetworkAccess enumerates the values for public network access. +type PublicNetworkAccess string + +const ( + // PublicNetworkAccessDisabled You cannot access the underlying data of the disk publicly on the internet + // even when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your + // trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate. + PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" + // PublicNetworkAccessEnabled You can generate a SAS URI to access the underlying data of the disk publicly + // on the internet when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI + // only from your trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate. + PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" +) + +// PossiblePublicNetworkAccessValues returns an array of possible values for the PublicNetworkAccess const type. +func PossiblePublicNetworkAccessValues() []PublicNetworkAccess { + return []PublicNetworkAccess{PublicNetworkAccessDisabled, PublicNetworkAccessEnabled} +} + +// ReplicationMode enumerates the values for replication mode. +type ReplicationMode string + +const ( + // ReplicationModeFull ... + ReplicationModeFull ReplicationMode = "Full" + // ReplicationModeShallow ... + ReplicationModeShallow ReplicationMode = "Shallow" +) + +// PossibleReplicationModeValues returns an array of possible values for the ReplicationMode const type. +func PossibleReplicationModeValues() []ReplicationMode { + return []ReplicationMode{ReplicationModeFull, ReplicationModeShallow} +} + // ReplicationState enumerates the values for replication state. type ReplicationState string @@ -929,6 +1224,19 @@ func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType { return []ResourceSkuRestrictionsType{ResourceSkuRestrictionsTypeLocation, ResourceSkuRestrictionsTypeZone} } +// RestorePointCollectionExpandOptions enumerates the values for restore point collection expand options. +type RestorePointCollectionExpandOptions string + +const ( + // RestorePointCollectionExpandOptionsRestorePoints ... + RestorePointCollectionExpandOptionsRestorePoints RestorePointCollectionExpandOptions = "restorePoints" +) + +// PossibleRestorePointCollectionExpandOptionsValues returns an array of possible values for the RestorePointCollectionExpandOptions const type. +func PossibleRestorePointCollectionExpandOptionsValues() []RestorePointCollectionExpandOptions { + return []RestorePointCollectionExpandOptions{RestorePointCollectionExpandOptionsRestorePoints} +} + // RollingUpgradeActionType enumerates the values for rolling upgrade action type. type RollingUpgradeActionType string @@ -976,6 +1284,19 @@ func PossibleSecurityTypesValues() []SecurityTypes { return []SecurityTypes{SecurityTypesTrustedLaunch} } +// SelectPermissions enumerates the values for select permissions. +type SelectPermissions string + +const ( + // SelectPermissionsPermissions ... + SelectPermissionsPermissions SelectPermissions = "Permissions" +) + +// PossibleSelectPermissionsValues returns an array of possible values for the SelectPermissions const type. +func PossibleSelectPermissionsValues() []SelectPermissions { + return []SelectPermissions{SelectPermissionsPermissions} +} + // SettingNames enumerates the values for setting names. type SettingNames string @@ -991,6 +1312,51 @@ func PossibleSettingNamesValues() []SettingNames { return []SettingNames{SettingNamesAutoLogon, SettingNamesFirstLogonCommands} } +// SharedToValues enumerates the values for shared to values. +type SharedToValues string + +const ( + // SharedToValuesTenant ... + SharedToValuesTenant SharedToValues = "tenant" +) + +// PossibleSharedToValuesValues returns an array of possible values for the SharedToValues const type. +func PossibleSharedToValuesValues() []SharedToValues { + return []SharedToValues{SharedToValuesTenant} +} + +// SharingProfileGroupTypes enumerates the values for sharing profile group types. +type SharingProfileGroupTypes string + +const ( + // SharingProfileGroupTypesAADTenants ... + SharingProfileGroupTypesAADTenants SharingProfileGroupTypes = "AADTenants" + // SharingProfileGroupTypesSubscriptions ... + SharingProfileGroupTypesSubscriptions SharingProfileGroupTypes = "Subscriptions" +) + +// PossibleSharingProfileGroupTypesValues returns an array of possible values for the SharingProfileGroupTypes const type. +func PossibleSharingProfileGroupTypesValues() []SharingProfileGroupTypes { + return []SharingProfileGroupTypes{SharingProfileGroupTypesAADTenants, SharingProfileGroupTypesSubscriptions} +} + +// SharingUpdateOperationTypes enumerates the values for sharing update operation types. +type SharingUpdateOperationTypes string + +const ( + // SharingUpdateOperationTypesAdd ... + SharingUpdateOperationTypesAdd SharingUpdateOperationTypes = "Add" + // SharingUpdateOperationTypesRemove ... + SharingUpdateOperationTypesRemove SharingUpdateOperationTypes = "Remove" + // SharingUpdateOperationTypesReset ... + SharingUpdateOperationTypesReset SharingUpdateOperationTypes = "Reset" +) + +// PossibleSharingUpdateOperationTypesValues returns an array of possible values for the SharingUpdateOperationTypes const type. +func PossibleSharingUpdateOperationTypesValues() []SharingUpdateOperationTypes { + return []SharingUpdateOperationTypes{SharingUpdateOperationTypesAdd, SharingUpdateOperationTypesRemove, SharingUpdateOperationTypesReset} +} + // SnapshotStorageAccountTypes enumerates the values for snapshot storage account types. type SnapshotStorageAccountTypes string @@ -1643,6 +2009,21 @@ func PossibleVMGuestPatchRebootStatusValues() []VMGuestPatchRebootStatus { return []VMGuestPatchRebootStatus{VMGuestPatchRebootStatusCompleted, VMGuestPatchRebootStatusFailed, VMGuestPatchRebootStatusNotNeeded, VMGuestPatchRebootStatusRequired, VMGuestPatchRebootStatusStarted, VMGuestPatchRebootStatusUnknown} } +// WindowsPatchAssessmentMode enumerates the values for windows patch assessment mode. +type WindowsPatchAssessmentMode string + +const ( + // WindowsPatchAssessmentModeAutomaticByPlatform ... + WindowsPatchAssessmentModeAutomaticByPlatform WindowsPatchAssessmentMode = "AutomaticByPlatform" + // WindowsPatchAssessmentModeImageDefault ... + WindowsPatchAssessmentModeImageDefault WindowsPatchAssessmentMode = "ImageDefault" +) + +// PossibleWindowsPatchAssessmentModeValues returns an array of possible values for the WindowsPatchAssessmentMode const type. +func PossibleWindowsPatchAssessmentModeValues() []WindowsPatchAssessmentMode { + return []WindowsPatchAssessmentMode{WindowsPatchAssessmentModeAutomaticByPlatform, WindowsPatchAssessmentModeImageDefault} +} + // WindowsVMGuestPatchMode enumerates the values for windows vm guest patch mode. type WindowsVMGuestPatchMode string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleries.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go index 0d32432e367a..9730526f7605 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go @@ -70,7 +70,7 @@ func (client GalleriesClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -151,7 +151,7 @@ func (client GalleriesClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -195,7 +195,8 @@ func (client GalleriesClient) DeleteResponder(resp *http.Response) (result autor // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery. -func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string) (result Gallery, err error) { +// selectParameter - the select expression to apply on the operation. +func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions) (result Gallery, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Get") defer func() { @@ -206,7 +207,7 @@ func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, tracing.EndSpan(ctx, sc, err) }() } - req, err := client.GetPreparer(ctx, resourceGroupName, galleryName) + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, selectParameter) if err != nil { err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", nil, "Failure preparing request") return @@ -229,17 +230,20 @@ func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, } // GetPreparer prepares the Get request. -func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { +func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions) (*http.Request, error) { pathParameters := map[string]interface{}{ "galleryName": autorest.Encode("path", galleryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(string(selectParameter)) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -312,7 +316,7 @@ func (client GalleriesClient) ListPreparer(ctx context.Context) (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -428,7 +432,7 @@ func (client GalleriesClient) ListByResourceGroupPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -536,7 +540,7 @@ func (client GalleriesClient) UpdatePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryapplications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryapplications.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go index 7886b4fec37c..f496086291f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryapplications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go @@ -75,7 +75,7 @@ func (client GalleryApplicationsClient) CreateOrUpdatePreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -159,7 +159,7 @@ func (client GalleryApplicationsClient) DeletePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -247,7 +247,7 @@ func (client GalleryApplicationsClient) GetPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -329,7 +329,7 @@ func (client GalleryApplicationsClient) ListByGalleryPreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -441,7 +441,7 @@ func (client GalleryApplicationsClient) UpdatePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryapplicationversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryapplicationversions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go index 1390517d554d..947fae104d74 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryapplicationversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go @@ -94,7 +94,7 @@ func (client GalleryApplicationVersionsClient) CreateOrUpdatePreparer(ctx contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -180,7 +180,7 @@ func (client GalleryApplicationVersionsClient) DeletePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -271,7 +271,7 @@ func (client GalleryApplicationVersionsClient) GetPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -358,7 +358,7 @@ func (client GalleryApplicationVersionsClient) ListByGalleryApplicationPreparer( "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -472,7 +472,7 @@ func (client GalleryApplicationVersionsClient) UpdatePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryimages.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go index a57095ccf216..bf16351b3ec6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go @@ -31,11 +31,11 @@ func NewGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) Ga return GalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate create or update a gallery Image Definition. +// CreateOrUpdate create or update a gallery image definition. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition is to be created. -// galleryImageName - the name of the gallery Image Definition to be created or updated. The allowed characters +// galleryImageName - the name of the gallery image definition to be created or updated. The allowed characters // are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 // characters. // galleryImage - parameters supplied to the create or update gallery image operation. @@ -86,7 +86,7 @@ func (client GalleryImagesClient) CreateOrUpdatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -133,7 +133,7 @@ func (client GalleryImagesClient) CreateOrUpdateResponder(resp *http.Response) ( // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition is to be deleted. -// galleryImageName - the name of the gallery Image Definition to be deleted. +// galleryImageName - the name of the gallery image definition to be deleted. func (client GalleryImagesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImagesDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Delete") @@ -169,7 +169,7 @@ func (client GalleryImagesClient) DeletePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,11 +209,11 @@ func (client GalleryImagesClient) DeleteResponder(resp *http.Response) (result a return } -// Get retrieves information about a gallery Image Definition. +// Get retrieves information about a gallery image definition. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery from which the Image Definitions are to be retrieved. -// galleryImageName - the name of the gallery Image Definition to be retrieved. +// galleryImageName - the name of the gallery image definition to be retrieved. func (client GalleryImagesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Get") @@ -256,7 +256,7 @@ func (client GalleryImagesClient) GetPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -287,7 +287,7 @@ func (client GalleryImagesClient) GetResponder(resp *http.Response) (result Gall return } -// ListByGallery list gallery Image Definitions in a gallery. +// ListByGallery list gallery image definitions in a gallery. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery from which Image Definitions are to be listed. @@ -337,7 +337,7 @@ func (client GalleryImagesClient) ListByGalleryPreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -405,11 +405,11 @@ func (client GalleryImagesClient) ListByGalleryComplete(ctx context.Context, res return } -// Update update a gallery Image Definition. +// Update update a gallery image definition. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition is to be updated. -// galleryImageName - the name of the gallery Image Definition to be updated. The allowed characters are +// galleryImageName - the name of the gallery image definition to be updated. The allowed characters are // alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 // characters. // galleryImage - parameters supplied to the update gallery image operation. @@ -448,7 +448,7 @@ func (client GalleryImagesClient) UpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go similarity index 95% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryimageversions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go index 5db43f681fcf..0ee7a395b9ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/galleryimageversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go @@ -32,15 +32,15 @@ func NewGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID str return GalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate create or update a gallery Image Version. +// CreateOrUpdate create or update a gallery image version. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition resides. -// galleryImageName - the name of the gallery Image Definition in which the Image Version is to be created. -// galleryImageVersionName - the name of the gallery Image Version to be created. Needs to follow semantic +// galleryImageName - the name of the gallery image definition in which the Image Version is to be created. +// galleryImageVersionName - the name of the gallery image version to be created. Needs to follow semantic // version name pattern: The allowed characters are digit and period. Digits must be within the range of a // 32-bit integer. Format: .. -// galleryImageVersion - parameters supplied to the create or update gallery Image Version operation. +// galleryImageVersion - parameters supplied to the create or update gallery image version operation. func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (result GalleryImageVersionsCreateOrUpdateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.CreateOrUpdate") @@ -84,7 +84,7 @@ func (client GalleryImageVersionsClient) CreateOrUpdatePreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -127,12 +127,12 @@ func (client GalleryImageVersionsClient) CreateOrUpdateResponder(resp *http.Resp return } -// Delete delete a gallery Image Version. +// Delete delete a gallery image version. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition resides. -// galleryImageName - the name of the gallery Image Definition in which the Image Version resides. -// galleryImageVersionName - the name of the gallery Image Version to be deleted. +// galleryImageName - the name of the gallery image definition in which the Image Version resides. +// galleryImageVersionName - the name of the gallery image version to be deleted. func (client GalleryImageVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (result GalleryImageVersionsDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Delete") @@ -169,7 +169,7 @@ func (client GalleryImageVersionsClient) DeletePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,12 +209,12 @@ func (client GalleryImageVersionsClient) DeleteResponder(resp *http.Response) (r return } -// Get retrieves information about a gallery Image Version. +// Get retrieves information about a gallery image version. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition resides. -// galleryImageName - the name of the gallery Image Definition in which the Image Version resides. -// galleryImageVersionName - the name of the gallery Image Version to be retrieved. +// galleryImageName - the name of the gallery image definition in which the Image Version resides. +// galleryImageVersionName - the name of the gallery image version to be retrieved. // expand - the expand expression to apply on the operation. func (client GalleryImageVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (result GalleryImageVersion, err error) { if tracing.IsEnabled() { @@ -259,7 +259,7 @@ func (client GalleryImageVersionsClient) GetPreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -293,7 +293,7 @@ func (client GalleryImageVersionsClient) GetResponder(resp *http.Response) (resu return } -// ListByGalleryImage list gallery Image Versions in a gallery Image Definition. +// ListByGalleryImage list gallery image versions in a gallery image definition. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition resides. @@ -346,7 +346,7 @@ func (client GalleryImageVersionsClient) ListByGalleryImagePreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -414,15 +414,15 @@ func (client GalleryImageVersionsClient) ListByGalleryImageComplete(ctx context. return } -// Update update a gallery Image Version. +// Update update a gallery image version. // Parameters: // resourceGroupName - the name of the resource group. // galleryName - the name of the Shared Image Gallery in which the Image Definition resides. -// galleryImageName - the name of the gallery Image Definition in which the Image Version is to be updated. -// galleryImageVersionName - the name of the gallery Image Version to be updated. Needs to follow semantic +// galleryImageName - the name of the gallery image definition in which the Image Version is to be updated. +// galleryImageVersionName - the name of the gallery image version to be updated. Needs to follow semantic // version name pattern: The allowed characters are digit and period. Digits must be within the range of a // 32-bit integer. Format: .. -// galleryImageVersion - parameters supplied to the update gallery Image Version operation. +// galleryImageVersion - parameters supplied to the update gallery image version operation. func (client GalleryImageVersionsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate) (result GalleryImageVersionsUpdateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Update") @@ -459,7 +459,7 @@ func (client GalleryImageVersionsClient) UpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go new file mode 100644 index 000000000000..eab53278bb40 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go @@ -0,0 +1,114 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// GallerySharingProfileClient is the compute Client +type GallerySharingProfileClient struct { + BaseClient +} + +// NewGallerySharingProfileClient creates an instance of the GallerySharingProfileClient client. +func NewGallerySharingProfileClient(subscriptionID string) GallerySharingProfileClient { + return NewGallerySharingProfileClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGallerySharingProfileClientWithBaseURI creates an instance of the GallerySharingProfileClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewGallerySharingProfileClientWithBaseURI(baseURI string, subscriptionID string) GallerySharingProfileClient { + return GallerySharingProfileClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Update update sharing profile of a gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery. +// sharingUpdate - parameters supplied to the update gallery sharing profile. +func (client GallerySharingProfileClient) Update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate) (result GallerySharingProfileUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GallerySharingProfileClient.Update") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, sharingUpdate) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GallerySharingProfileClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/share", pathParameters), + autorest.WithJSON(sharingUpdate), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GallerySharingProfileClient) UpdateSender(req *http.Request) (future GallerySharingProfileUpdateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GallerySharingProfileClient) UpdateResponder(resp *http.Response) (result SharingUpdate, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/images.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go index 097a8f5dc61b..66a9bcbd0c44 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go @@ -69,7 +69,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -150,7 +150,7 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -236,7 +236,7 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -316,7 +316,7 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -432,7 +432,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -539,7 +539,7 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/loganalytics.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go index 38908fd0de79..ec30f9cc8c61 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/loganalytics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go @@ -75,7 +75,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -162,7 +162,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go similarity index 76% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/models.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go index 14a353540e8a..9718eb39f588 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go @@ -14,11 +14,12 @@ import ( "github.com/Azure/go-autorest/autorest/date" "github.com/Azure/go-autorest/autorest/to" "github.com/Azure/go-autorest/tracing" + "io" "net/http" ) // The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" +const fqdn = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" // AccessURI a disk access SAS uri. type AccessURI struct { @@ -38,6 +39,8 @@ func (au AccessURI) MarshalJSON() ([]byte, error) { type AdditionalCapabilities struct { // UltraSSDEnabled - The flag that enables or disables a capability to have one or more managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale set only if this property is enabled. UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"` + // HibernationEnabled - The flag that enables or disables hibernation capability on the VM. + HibernationEnabled *bool `json:"hibernationEnabled,omitempty"` } // AdditionalUnattendContent specifies additional XML formatted information that can be included in the @@ -84,6 +87,13 @@ type APIErrorBase struct { Message *string `json:"message,omitempty"` } +// ApplicationProfile contains the list of gallery applications that should be made available to the +// VM/VMSS +type ApplicationProfile struct { + // GalleryApplications - Specifies the gallery applications that should be made available to the VM/VMSS + GalleryApplications *[]VMGalleryApplication `json:"galleryApplications,omitempty"` +} + // AutomaticOSUpgradePolicy the configuration parameters used for performing automatic OS upgrade. type AutomaticOSUpgradePolicy struct { // EnableAutomaticOSUpgrade - Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. Default value is false.

If this is set to true for Windows based scale sets, [enableAutomaticUpdates](https://docs.microsoft.com/dotnet/api/microsoft.azure.management.compute.models.windowsconfiguration.enableautomaticupdates?view=azure-dotnet) is automatically set to false and cannot be set to true. @@ -109,14 +119,12 @@ type AutomaticRepairsPolicy struct { // AvailabilitySet specifies information about the availability set that the virtual machine should be // assigned to. Virtual machines specified in the same availability set are allocated to different nodes to -// maximize availability. For more information about availability sets, see [Manage the availability of -// virtual -// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -//

For more information on Azure planned maintenance, see [Planned maintenance for virtual -// machines in -// Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) -//

Currently, a VM can only be added to availability set at creation time. An existing VM cannot -// be added to an availability set. +// maximize availability. For more information about availability sets, see [Availability sets +// overview](https://docs.microsoft.com/azure/virtual-machines/availability-set-overview).

For +// more information on Azure planned maintenance, see [Maintenance and updates for Virtual Machines in +// Azure](https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates)

Currently, a +// VM can only be added to availability set at creation time. An existing VM cannot be added to an +// availability set. type AvailabilitySet struct { autorest.Response `json:"-"` *AvailabilitySetProperties `json:"properties,omitempty"` @@ -548,158 +556,14 @@ func (bdiv BootDiagnosticsInstanceView) MarshalJSON() ([]byte, error) { return json.Marshal(objectMap) } -// CloudError an error response from the Compute service. -type CloudError struct { - Error *APIError `json:"error,omitempty"` -} - -// CreationData data used when creating a disk. -type CreationData struct { - // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'DiskCreateOptionEmpty', 'DiskCreateOptionAttach', 'DiskCreateOptionFromImage', 'DiskCreateOptionImport', 'DiskCreateOptionCopy', 'DiskCreateOptionRestore', 'DiskCreateOptionUpload' - CreateOption DiskCreateOption `json:"createOption,omitempty"` - // StorageAccountID - Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk. - StorageAccountID *string `json:"storageAccountId,omitempty"` - // ImageReference - Disk source information. - ImageReference *ImageDiskReference `json:"imageReference,omitempty"` - // GalleryImageReference - Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk. - GalleryImageReference *ImageDiskReference `json:"galleryImageReference,omitempty"` - // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. - SourceURI *string `json:"sourceUri,omitempty"` - // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. - SourceResourceID *string `json:"sourceResourceId,omitempty"` - // SourceUniqueID - READ-ONLY; If this field is set, this is the unique id identifying the source of this resource. - SourceUniqueID *string `json:"sourceUniqueId,omitempty"` - // UploadSizeBytes - If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). - UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"` - // LogicalSectorSize - Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default. - LogicalSectorSize *int32 `json:"logicalSectorSize,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreationData. -func (cd CreationData) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cd.CreateOption != "" { - objectMap["createOption"] = cd.CreateOption - } - if cd.StorageAccountID != nil { - objectMap["storageAccountId"] = cd.StorageAccountID - } - if cd.ImageReference != nil { - objectMap["imageReference"] = cd.ImageReference - } - if cd.GalleryImageReference != nil { - objectMap["galleryImageReference"] = cd.GalleryImageReference - } - if cd.SourceURI != nil { - objectMap["sourceUri"] = cd.SourceURI - } - if cd.SourceResourceID != nil { - objectMap["sourceResourceId"] = cd.SourceResourceID - } - if cd.UploadSizeBytes != nil { - objectMap["uploadSizeBytes"] = cd.UploadSizeBytes - } - if cd.LogicalSectorSize != nil { - objectMap["logicalSectorSize"] = cd.LogicalSectorSize - } - return json.Marshal(objectMap) -} - -// DataDisk describes a data disk. -type DataDisk struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Vhd - The virtual hard disk. - Vhd *VirtualHardDisk `json:"vhd,omitempty"` - // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. - Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` - // ToBeDetached - Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset - ToBeDetached *bool `json:"toBeDetached,omitempty"` - // DiskIOPSReadWrite - READ-ONLY; Specifies the Read-Write IOPS for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set. - DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` - // DiskMBpsReadWrite - READ-ONLY; Specifies the bandwidth in MB per second for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set. - DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` - // DetachOption - Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: **ForceDetach**.

detachOption: **ForceDetach** is applicable only for managed data disks. If a previous detachment attempt of the data disk did not complete due to an unexpected failure from the virtual machine and the disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes might not have been flushed when using this detach behavior.

This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible values include: 'DiskDetachOptionTypesForceDetach' - DetachOption DiskDetachOptionTypes `json:"detachOption,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataDisk. -func (dd DataDisk) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dd.Lun != nil { - objectMap["lun"] = dd.Lun - } - if dd.Name != nil { - objectMap["name"] = dd.Name - } - if dd.Vhd != nil { - objectMap["vhd"] = dd.Vhd - } - if dd.Image != nil { - objectMap["image"] = dd.Image - } - if dd.Caching != "" { - objectMap["caching"] = dd.Caching - } - if dd.WriteAcceleratorEnabled != nil { - objectMap["writeAcceleratorEnabled"] = dd.WriteAcceleratorEnabled - } - if dd.CreateOption != "" { - objectMap["createOption"] = dd.CreateOption - } - if dd.DiskSizeGB != nil { - objectMap["diskSizeGB"] = dd.DiskSizeGB - } - if dd.ManagedDisk != nil { - objectMap["managedDisk"] = dd.ManagedDisk - } - if dd.ToBeDetached != nil { - objectMap["toBeDetached"] = dd.ToBeDetached - } - if dd.DetachOption != "" { - objectMap["detachOption"] = dd.DetachOption - } - return json.Marshal(objectMap) -} - -// DataDiskImage contains the data disk images information. -type DataDiskImage struct { - // Lun - READ-ONLY; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataDiskImage. -func (ddi DataDiskImage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataDiskImageEncryption contains encryption settings for a data disk image. -type DataDiskImageEncryption struct { - // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. - Lun *int32 `json:"lun,omitempty"` - // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set. - DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` -} - -// DedicatedHost specifies information about the Dedicated host. -type DedicatedHost struct { - autorest.Response `json:"-"` - *DedicatedHostProperties `json:"properties,omitempty"` - // Sku - SKU of the dedicated host for Hardware Generation and VM family. Only name is required to be set. List Microsoft.Compute SKUs for a list of possible values. +// CapacityReservation specifies information about the capacity reservation. +type CapacityReservation struct { + autorest.Response `json:"-"` + *CapacityReservationProperties `json:"properties,omitempty"` + // Sku - SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently VM Skus with the capability called 'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values. Sku *Sku `json:"sku,omitempty"` + // Zones - Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. The zone can be assigned only during creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone. + Zones *[]string `json:"zones,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -712,26 +576,29 @@ type DedicatedHost struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DedicatedHost. -func (dh DedicatedHost) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservation. +func (cr CapacityReservation) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dh.DedicatedHostProperties != nil { - objectMap["properties"] = dh.DedicatedHostProperties + if cr.CapacityReservationProperties != nil { + objectMap["properties"] = cr.CapacityReservationProperties } - if dh.Sku != nil { - objectMap["sku"] = dh.Sku + if cr.Sku != nil { + objectMap["sku"] = cr.Sku } - if dh.Location != nil { - objectMap["location"] = dh.Location + if cr.Zones != nil { + objectMap["zones"] = cr.Zones } - if dh.Tags != nil { - objectMap["tags"] = dh.Tags + if cr.Location != nil { + objectMap["location"] = cr.Location + } + if cr.Tags != nil { + objectMap["tags"] = cr.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DedicatedHost struct. -func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for CapacityReservation struct. +func (cr *CapacityReservation) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -741,12 +608,12 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var dedicatedHostProperties DedicatedHostProperties - err = json.Unmarshal(*v, &dedicatedHostProperties) + var capacityReservationProperties CapacityReservationProperties + err = json.Unmarshal(*v, &capacityReservationProperties) if err != nil { return err } - dh.DedicatedHostProperties = &dedicatedHostProperties + cr.CapacityReservationProperties = &capacityReservationProperties } case "sku": if v != nil { @@ -755,7 +622,16 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { if err != nil { return err } - dh.Sku = &sku + cr.Sku = &sku + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + cr.Zones = &zones } case "id": if v != nil { @@ -764,7 +640,7 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { if err != nil { return err } - dh.ID = &ID + cr.ID = &ID } case "name": if v != nil { @@ -773,7 +649,7 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { if err != nil { return err } - dh.Name = &name + cr.Name = &name } case "type": if v != nil { @@ -782,7 +658,7 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { if err != nil { return err } - dh.Type = &typeVar + cr.Type = &typeVar } case "location": if v != nil { @@ -791,7 +667,7 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { if err != nil { return err } - dh.Location = &location + cr.Location = &location } case "tags": if v != nil { @@ -800,7 +676,7 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { if err != nil { return err } - dh.Tags = tags + cr.Tags = tags } } } @@ -808,28 +684,14 @@ func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { return nil } -// DedicatedHostAllocatableVM represents the dedicated host unutilized capacity in terms of a specific VM -// size. -type DedicatedHostAllocatableVM struct { - // VMSize - VM size in terms of which the unutilized capacity is represented. - VMSize *string `json:"vmSize,omitempty"` - // Count - Maximum number of VMs of size vmSize that can fit in the dedicated host's remaining capacity. - Count *float64 `json:"count,omitempty"` -} - -// DedicatedHostAvailableCapacity dedicated host unutilized capacity. -type DedicatedHostAvailableCapacity struct { - // AllocatableVMs - The unutilized capacity of the dedicated host represented in terms of each VM size that is allowed to be deployed to the dedicated host. - AllocatableVMs *[]DedicatedHostAllocatableVM `json:"allocatableVMs,omitempty"` -} - -// DedicatedHostGroup specifies information about the dedicated host group that the dedicated hosts should -// be assigned to.

Currently, a dedicated host can only be added to a dedicated host group at -// creation time. An existing dedicated host cannot be added to another dedicated host group. -type DedicatedHostGroup struct { - autorest.Response `json:"-"` - *DedicatedHostGroupProperties `json:"properties,omitempty"` - // Zones - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. If not provided, the group supports all zones in the region. If provided, enforces each host in the group to be in the same zone. +// CapacityReservationGroup specifies information about the capacity reservation group that the capacity +// reservations should be assigned to.

Currently, a capacity reservation can only be added to a +// capacity reservation group at creation time. An existing capacity reservation cannot be added or moved +// to another capacity reservation group. +type CapacityReservationGroup struct { + autorest.Response `json:"-"` + *CapacityReservationGroupProperties `json:"properties,omitempty"` + // Zones - Availability Zones to use for this capacity reservation group. The zones can be assigned only during creation. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones. Zones *[]string `json:"zones,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` @@ -843,26 +705,26 @@ type DedicatedHostGroup struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DedicatedHostGroup. -func (dhg DedicatedHostGroup) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservationGroup. +func (crg CapacityReservationGroup) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dhg.DedicatedHostGroupProperties != nil { - objectMap["properties"] = dhg.DedicatedHostGroupProperties + if crg.CapacityReservationGroupProperties != nil { + objectMap["properties"] = crg.CapacityReservationGroupProperties } - if dhg.Zones != nil { - objectMap["zones"] = dhg.Zones + if crg.Zones != nil { + objectMap["zones"] = crg.Zones } - if dhg.Location != nil { - objectMap["location"] = dhg.Location + if crg.Location != nil { + objectMap["location"] = crg.Location } - if dhg.Tags != nil { - objectMap["tags"] = dhg.Tags + if crg.Tags != nil { + objectMap["tags"] = crg.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DedicatedHostGroup struct. -func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for CapacityReservationGroup struct. +func (crg *CapacityReservationGroup) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -872,12 +734,12 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var dedicatedHostGroupProperties DedicatedHostGroupProperties - err = json.Unmarshal(*v, &dedicatedHostGroupProperties) + var capacityReservationGroupProperties CapacityReservationGroupProperties + err = json.Unmarshal(*v, &capacityReservationGroupProperties) if err != nil { return err } - dhg.DedicatedHostGroupProperties = &dedicatedHostGroupProperties + crg.CapacityReservationGroupProperties = &capacityReservationGroupProperties } case "zones": if v != nil { @@ -886,7 +748,7 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhg.Zones = &zones + crg.Zones = &zones } case "id": if v != nil { @@ -895,7 +757,7 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhg.ID = &ID + crg.ID = &ID } case "name": if v != nil { @@ -904,7 +766,7 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhg.Name = &name + crg.Name = &name } case "type": if v != nil { @@ -913,7 +775,7 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhg.Type = &typeVar + crg.Type = &typeVar } case "location": if v != nil { @@ -922,7 +784,7 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhg.Location = &location + crg.Location = &location } case "tags": if v != nil { @@ -931,7 +793,7 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhg.Tags = tags + crg.Tags = tags } } } @@ -939,32 +801,39 @@ func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { return nil } -// DedicatedHostGroupInstanceView ... -type DedicatedHostGroupInstanceView struct { - // Hosts - List of instance view of the dedicated hosts under the dedicated host group. - Hosts *[]DedicatedHostInstanceViewWithName `json:"hosts,omitempty"` +// CapacityReservationGroupInstanceView ... +type CapacityReservationGroupInstanceView struct { + // CapacityReservations - READ-ONLY; List of instance view of the capacity reservations under the capacity reservation group. + CapacityReservations *[]CapacityReservationInstanceViewWithName `json:"capacityReservations,omitempty"` } -// DedicatedHostGroupListResult the List Dedicated Host Group with resource group response. -type DedicatedHostGroupListResult struct { +// MarshalJSON is the custom marshaler for CapacityReservationGroupInstanceView. +func (crgiv CapacityReservationGroupInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CapacityReservationGroupListResult the List capacity reservation group with resource group response. +type CapacityReservationGroupListResult struct { autorest.Response `json:"-"` - // Value - The list of dedicated host groups - Value *[]DedicatedHostGroup `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of Dedicated Host Groups. Call ListNext() with this URI to fetch the next page of Dedicated Host Groups. + // Value - The list of capacity reservation groups + Value *[]CapacityReservationGroup `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of capacity reservation groups. Call ListNext() with this URI to fetch the next page of capacity reservation groups. NextLink *string `json:"nextLink,omitempty"` } -// DedicatedHostGroupListResultIterator provides access to a complete listing of DedicatedHostGroup values. -type DedicatedHostGroupListResultIterator struct { +// CapacityReservationGroupListResultIterator provides access to a complete listing of +// CapacityReservationGroup values. +type CapacityReservationGroupListResultIterator struct { i int - page DedicatedHostGroupListResultPage + page CapacityReservationGroupListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DedicatedHostGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *CapacityReservationGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -989,67 +858,67 @@ func (iter *DedicatedHostGroupListResultIterator) NextWithContext(ctx context.Co // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *DedicatedHostGroupListResultIterator) Next() error { +func (iter *CapacityReservationGroupListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DedicatedHostGroupListResultIterator) NotDone() bool { +func (iter CapacityReservationGroupListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DedicatedHostGroupListResultIterator) Response() DedicatedHostGroupListResult { +func (iter CapacityReservationGroupListResultIterator) Response() CapacityReservationGroupListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DedicatedHostGroupListResultIterator) Value() DedicatedHostGroup { +func (iter CapacityReservationGroupListResultIterator) Value() CapacityReservationGroup { if !iter.page.NotDone() { - return DedicatedHostGroup{} + return CapacityReservationGroup{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the DedicatedHostGroupListResultIterator type. -func NewDedicatedHostGroupListResultIterator(page DedicatedHostGroupListResultPage) DedicatedHostGroupListResultIterator { - return DedicatedHostGroupListResultIterator{page: page} +// Creates a new instance of the CapacityReservationGroupListResultIterator type. +func NewCapacityReservationGroupListResultIterator(page CapacityReservationGroupListResultPage) CapacityReservationGroupListResultIterator { + return CapacityReservationGroupListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (dhglr DedicatedHostGroupListResult) IsEmpty() bool { - return dhglr.Value == nil || len(*dhglr.Value) == 0 +func (crglr CapacityReservationGroupListResult) IsEmpty() bool { + return crglr.Value == nil || len(*crglr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (dhglr DedicatedHostGroupListResult) hasNextLink() bool { - return dhglr.NextLink != nil && len(*dhglr.NextLink) != 0 +func (crglr CapacityReservationGroupListResult) hasNextLink() bool { + return crglr.NextLink != nil && len(*crglr.NextLink) != 0 } -// dedicatedHostGroupListResultPreparer prepares a request to retrieve the next set of results. +// capacityReservationGroupListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (dhglr DedicatedHostGroupListResult) dedicatedHostGroupListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dhglr.hasNextLink() { +func (crglr CapacityReservationGroupListResult) capacityReservationGroupListResultPreparer(ctx context.Context) (*http.Request, error) { + if !crglr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(dhglr.NextLink))) + autorest.WithBaseURL(to.String(crglr.NextLink))) } -// DedicatedHostGroupListResultPage contains a page of DedicatedHostGroup values. -type DedicatedHostGroupListResultPage struct { - fn func(context.Context, DedicatedHostGroupListResult) (DedicatedHostGroupListResult, error) - dhglr DedicatedHostGroupListResult +// CapacityReservationGroupListResultPage contains a page of CapacityReservationGroup values. +type CapacityReservationGroupListResultPage struct { + fn func(context.Context, CapacityReservationGroupListResult) (CapacityReservationGroupListResult, error) + crglr CapacityReservationGroupListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DedicatedHostGroupListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *CapacityReservationGroupListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -1059,11 +928,11 @@ func (page *DedicatedHostGroupListResultPage) NextWithContext(ctx context.Contex }() } for { - next, err := page.fn(ctx, page.dhglr) + next, err := page.fn(ctx, page.crglr) if err != nil { return err } - page.dhglr = next + page.crglr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -1074,87 +943,74 @@ func (page *DedicatedHostGroupListResultPage) NextWithContext(ctx context.Contex // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *DedicatedHostGroupListResultPage) Next() error { +func (page *CapacityReservationGroupListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DedicatedHostGroupListResultPage) NotDone() bool { - return !page.dhglr.IsEmpty() +func (page CapacityReservationGroupListResultPage) NotDone() bool { + return !page.crglr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DedicatedHostGroupListResultPage) Response() DedicatedHostGroupListResult { - return page.dhglr +func (page CapacityReservationGroupListResultPage) Response() CapacityReservationGroupListResult { + return page.crglr } // Values returns the slice of values for the current page or nil if there are no values. -func (page DedicatedHostGroupListResultPage) Values() []DedicatedHostGroup { - if page.dhglr.IsEmpty() { +func (page CapacityReservationGroupListResultPage) Values() []CapacityReservationGroup { + if page.crglr.IsEmpty() { return nil } - return *page.dhglr.Value + return *page.crglr.Value } -// Creates a new instance of the DedicatedHostGroupListResultPage type. -func NewDedicatedHostGroupListResultPage(cur DedicatedHostGroupListResult, getNextPage func(context.Context, DedicatedHostGroupListResult) (DedicatedHostGroupListResult, error)) DedicatedHostGroupListResultPage { - return DedicatedHostGroupListResultPage{ +// Creates a new instance of the CapacityReservationGroupListResultPage type. +func NewCapacityReservationGroupListResultPage(cur CapacityReservationGroupListResult, getNextPage func(context.Context, CapacityReservationGroupListResult) (CapacityReservationGroupListResult, error)) CapacityReservationGroupListResultPage { + return CapacityReservationGroupListResultPage{ fn: getNextPage, - dhglr: cur, + crglr: cur, } } -// DedicatedHostGroupProperties dedicated Host Group Properties. -type DedicatedHostGroupProperties struct { - // PlatformFaultDomainCount - Number of fault domains that the host group can span. - PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` - // Hosts - READ-ONLY; A list of references to all dedicated hosts in the dedicated host group. - Hosts *[]SubResourceReadOnly `json:"hosts,omitempty"` - // InstanceView - READ-ONLY; The dedicated host group instance view, which has the list of instance view of the dedicated hosts under the dedicated host group. - InstanceView *DedicatedHostGroupInstanceView `json:"instanceView,omitempty"` - // SupportAutomaticPlacement - Specifies whether virtual machines or virtual machine scale sets can be placed automatically on the dedicated host group. Automatic placement means resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to 'false' when not provided.

Minimum api-version: 2020-06-01. - SupportAutomaticPlacement *bool `json:"supportAutomaticPlacement,omitempty"` +// CapacityReservationGroupProperties capacity reservation group Properties. +type CapacityReservationGroupProperties struct { + // CapacityReservations - READ-ONLY; A list of all capacity reservation resource ids that belong to capacity reservation group. + CapacityReservations *[]SubResourceReadOnly `json:"capacityReservations,omitempty"` + // VirtualMachinesAssociated - READ-ONLY; A list of references to all virtual machines associated to the capacity reservation group. + VirtualMachinesAssociated *[]SubResourceReadOnly `json:"virtualMachinesAssociated,omitempty"` + // InstanceView - READ-ONLY; The capacity reservation group instance view which has the list of instance views for all the capacity reservations that belong to the capacity reservation group. + InstanceView *CapacityReservationGroupInstanceView `json:"instanceView,omitempty"` } -// MarshalJSON is the custom marshaler for DedicatedHostGroupProperties. -func (dhgp DedicatedHostGroupProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservationGroupProperties. +func (crgp CapacityReservationGroupProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dhgp.PlatformFaultDomainCount != nil { - objectMap["platformFaultDomainCount"] = dhgp.PlatformFaultDomainCount - } - if dhgp.SupportAutomaticPlacement != nil { - objectMap["supportAutomaticPlacement"] = dhgp.SupportAutomaticPlacement - } return json.Marshal(objectMap) } -// DedicatedHostGroupUpdate specifies information about the dedicated host group that the dedicated host -// should be assigned to. Only tags may be updated. -type DedicatedHostGroupUpdate struct { - *DedicatedHostGroupProperties `json:"properties,omitempty"` - // Zones - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. If not provided, the group supports all zones in the region. If provided, enforces each host in the group to be in the same zone. - Zones *[]string `json:"zones,omitempty"` +// CapacityReservationGroupUpdate specifies information about the capacity reservation group. Only tags can +// be updated. +type CapacityReservationGroupUpdate struct { + *CapacityReservationGroupProperties `json:"properties,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DedicatedHostGroupUpdate. -func (dhgu DedicatedHostGroupUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservationGroupUpdate. +func (crgu CapacityReservationGroupUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dhgu.DedicatedHostGroupProperties != nil { - objectMap["properties"] = dhgu.DedicatedHostGroupProperties - } - if dhgu.Zones != nil { - objectMap["zones"] = dhgu.Zones + if crgu.CapacityReservationGroupProperties != nil { + objectMap["properties"] = crgu.CapacityReservationGroupProperties } - if dhgu.Tags != nil { - objectMap["tags"] = dhgu.Tags + if crgu.Tags != nil { + objectMap["tags"] = crgu.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DedicatedHostGroupUpdate struct. -func (dhgu *DedicatedHostGroupUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for CapacityReservationGroupUpdate struct. +func (crgu *CapacityReservationGroupUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -1164,21 +1020,12 @@ func (dhgu *DedicatedHostGroupUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var dedicatedHostGroupProperties DedicatedHostGroupProperties - err = json.Unmarshal(*v, &dedicatedHostGroupProperties) - if err != nil { - return err - } - dhgu.DedicatedHostGroupProperties = &dedicatedHostGroupProperties - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) + var capacityReservationGroupProperties CapacityReservationGroupProperties + err = json.Unmarshal(*v, &capacityReservationGroupProperties) if err != nil { return err } - dhgu.Zones = &zones + crgu.CapacityReservationGroupProperties = &capacityReservationGroupProperties } case "tags": if v != nil { @@ -1187,7 +1034,7 @@ func (dhgu *DedicatedHostGroupUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhgu.Tags = tags + crgu.Tags = tags } } } @@ -1195,73 +1042,61 @@ func (dhgu *DedicatedHostGroupUpdate) UnmarshalJSON(body []byte) error { return nil } -// DedicatedHostInstanceView the instance view of a dedicated host. -type DedicatedHostInstanceView struct { - // AssetID - READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides. - AssetID *string `json:"assetId,omitempty"` - // AvailableCapacity - Unutilized capacity of the dedicated host. - AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"` +// CapacityReservationInstanceView the instance view of a capacity reservation that provides as snapshot of +// the runtime properties of the capacity reservation that is managed by the platform and can change +// outside of control plane operations. +type CapacityReservationInstanceView struct { + // UtilizationInfo - Unutilized capacity of the capacity reservation. + UtilizationInfo *CapacityReservationUtilization `json:"utilizationInfo,omitempty"` // Statuses - The resource status information. Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } -// MarshalJSON is the custom marshaler for DedicatedHostInstanceView. -func (dhiv DedicatedHostInstanceView) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dhiv.AvailableCapacity != nil { - objectMap["availableCapacity"] = dhiv.AvailableCapacity - } - if dhiv.Statuses != nil { - objectMap["statuses"] = dhiv.Statuses - } - return json.Marshal(objectMap) -} - -// DedicatedHostInstanceViewWithName the instance view of a dedicated host that includes the name of the -// dedicated host. It is used for the response to the instance view of a dedicated host group. -type DedicatedHostInstanceViewWithName struct { - // Name - READ-ONLY; The name of the dedicated host. +// CapacityReservationInstanceViewWithName the instance view of a capacity reservation that includes the +// name of the capacity reservation. It is used for the response to the instance view of a capacity +// reservation group. +type CapacityReservationInstanceViewWithName struct { + // Name - READ-ONLY; The name of the capacity reservation. Name *string `json:"name,omitempty"` - // AssetID - READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides. - AssetID *string `json:"assetId,omitempty"` - // AvailableCapacity - Unutilized capacity of the dedicated host. - AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"` + // UtilizationInfo - Unutilized capacity of the capacity reservation. + UtilizationInfo *CapacityReservationUtilization `json:"utilizationInfo,omitempty"` // Statuses - The resource status information. Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } -// MarshalJSON is the custom marshaler for DedicatedHostInstanceViewWithName. -func (dhivwn DedicatedHostInstanceViewWithName) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservationInstanceViewWithName. +func (crivwn CapacityReservationInstanceViewWithName) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dhivwn.AvailableCapacity != nil { - objectMap["availableCapacity"] = dhivwn.AvailableCapacity + if crivwn.UtilizationInfo != nil { + objectMap["utilizationInfo"] = crivwn.UtilizationInfo } - if dhivwn.Statuses != nil { - objectMap["statuses"] = dhivwn.Statuses + if crivwn.Statuses != nil { + objectMap["statuses"] = crivwn.Statuses } return json.Marshal(objectMap) } -// DedicatedHostListResult the list dedicated host operation response. -type DedicatedHostListResult struct { +// CapacityReservationListResult the list capacity reservation operation response. +type CapacityReservationListResult struct { autorest.Response `json:"-"` - // Value - The list of dedicated hosts - Value *[]DedicatedHost `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of dedicated hosts. Call ListNext() with this URI to fetch the next page of dedicated hosts. + // Value - The list of capacity reservations + Value *[]CapacityReservation `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of capacity reservations. Call ListNext() with this URI to fetch the next page of capacity reservations. NextLink *string `json:"nextLink,omitempty"` } -// DedicatedHostListResultIterator provides access to a complete listing of DedicatedHost values. -type DedicatedHostListResultIterator struct { +// CapacityReservationListResultIterator provides access to a complete listing of CapacityReservation +// values. +type CapacityReservationListResultIterator struct { i int - page DedicatedHostListResultPage + page CapacityReservationListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DedicatedHostListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *CapacityReservationListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -1286,67 +1121,67 @@ func (iter *DedicatedHostListResultIterator) NextWithContext(ctx context.Context // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *DedicatedHostListResultIterator) Next() error { +func (iter *CapacityReservationListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DedicatedHostListResultIterator) NotDone() bool { +func (iter CapacityReservationListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DedicatedHostListResultIterator) Response() DedicatedHostListResult { +func (iter CapacityReservationListResultIterator) Response() CapacityReservationListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DedicatedHostListResultIterator) Value() DedicatedHost { +func (iter CapacityReservationListResultIterator) Value() CapacityReservation { if !iter.page.NotDone() { - return DedicatedHost{} + return CapacityReservation{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the DedicatedHostListResultIterator type. -func NewDedicatedHostListResultIterator(page DedicatedHostListResultPage) DedicatedHostListResultIterator { - return DedicatedHostListResultIterator{page: page} +// Creates a new instance of the CapacityReservationListResultIterator type. +func NewCapacityReservationListResultIterator(page CapacityReservationListResultPage) CapacityReservationListResultIterator { + return CapacityReservationListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (dhlr DedicatedHostListResult) IsEmpty() bool { - return dhlr.Value == nil || len(*dhlr.Value) == 0 +func (crlr CapacityReservationListResult) IsEmpty() bool { + return crlr.Value == nil || len(*crlr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (dhlr DedicatedHostListResult) hasNextLink() bool { - return dhlr.NextLink != nil && len(*dhlr.NextLink) != 0 +func (crlr CapacityReservationListResult) hasNextLink() bool { + return crlr.NextLink != nil && len(*crlr.NextLink) != 0 } -// dedicatedHostListResultPreparer prepares a request to retrieve the next set of results. +// capacityReservationListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (dhlr DedicatedHostListResult) dedicatedHostListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dhlr.hasNextLink() { +func (crlr CapacityReservationListResult) capacityReservationListResultPreparer(ctx context.Context) (*http.Request, error) { + if !crlr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(dhlr.NextLink))) + autorest.WithBaseURL(to.String(crlr.NextLink))) } -// DedicatedHostListResultPage contains a page of DedicatedHost values. -type DedicatedHostListResultPage struct { - fn func(context.Context, DedicatedHostListResult) (DedicatedHostListResult, error) - dhlr DedicatedHostListResult +// CapacityReservationListResultPage contains a page of CapacityReservation values. +type CapacityReservationListResultPage struct { + fn func(context.Context, CapacityReservationListResult) (CapacityReservationListResult, error) + crlr CapacityReservationListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DedicatedHostListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *CapacityReservationListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -1356,11 +1191,11 @@ func (page *DedicatedHostListResultPage) NextWithContext(ctx context.Context) (e }() } for { - next, err := page.fn(ctx, page.dhlr) + next, err := page.fn(ctx, page.crlr) if err != nil { return err } - page.dhlr = next + page.crlr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -1371,82 +1206,73 @@ func (page *DedicatedHostListResultPage) NextWithContext(ctx context.Context) (e // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *DedicatedHostListResultPage) Next() error { +func (page *CapacityReservationListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DedicatedHostListResultPage) NotDone() bool { - return !page.dhlr.IsEmpty() +func (page CapacityReservationListResultPage) NotDone() bool { + return !page.crlr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DedicatedHostListResultPage) Response() DedicatedHostListResult { - return page.dhlr +func (page CapacityReservationListResultPage) Response() CapacityReservationListResult { + return page.crlr } // Values returns the slice of values for the current page or nil if there are no values. -func (page DedicatedHostListResultPage) Values() []DedicatedHost { - if page.dhlr.IsEmpty() { +func (page CapacityReservationListResultPage) Values() []CapacityReservation { + if page.crlr.IsEmpty() { return nil } - return *page.dhlr.Value + return *page.crlr.Value } -// Creates a new instance of the DedicatedHostListResultPage type. -func NewDedicatedHostListResultPage(cur DedicatedHostListResult, getNextPage func(context.Context, DedicatedHostListResult) (DedicatedHostListResult, error)) DedicatedHostListResultPage { - return DedicatedHostListResultPage{ +// Creates a new instance of the CapacityReservationListResultPage type. +func NewCapacityReservationListResultPage(cur CapacityReservationListResult, getNextPage func(context.Context, CapacityReservationListResult) (CapacityReservationListResult, error)) CapacityReservationListResultPage { + return CapacityReservationListResultPage{ fn: getNextPage, - dhlr: cur, + crlr: cur, } } -// DedicatedHostProperties properties of the dedicated host. -type DedicatedHostProperties struct { - // PlatformFaultDomain - Fault domain of the dedicated host within a dedicated host group. - PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` - // AutoReplaceOnFailure - Specifies whether the dedicated host should be replaced automatically in case of a failure. The value is defaulted to 'true' when not provided. - AutoReplaceOnFailure *bool `json:"autoReplaceOnFailure,omitempty"` - // HostID - READ-ONLY; A unique id generated and assigned to the dedicated host by the platform.

Does not change throughout the lifetime of the host. - HostID *string `json:"hostId,omitempty"` - // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the Dedicated Host. - VirtualMachines *[]SubResourceReadOnly `json:"virtualMachines,omitempty"` - // LicenseType - Specifies the software license type that will be applied to the VMs deployed on the dedicated host.

Possible values are:

**None**

**Windows_Server_Hybrid**

**Windows_Server_Perpetual**

Default: **None**. Possible values include: 'DedicatedHostLicenseTypesNone', 'DedicatedHostLicenseTypesWindowsServerHybrid', 'DedicatedHostLicenseTypesWindowsServerPerpetual' - LicenseType DedicatedHostLicenseTypes `json:"licenseType,omitempty"` - // ProvisioningTime - READ-ONLY; The date when the host was first provisioned. +// CapacityReservationProfile the parameters of a capacity reservation Profile. +type CapacityReservationProfile struct { + // CapacityReservationGroup - Specifies the capacity reservation group resource id that should be used for allocating the virtual machine or scaleset vm instances provided enough capacity has been reserved. Please refer to https://aka.ms/CapacityReservation for more details. + CapacityReservationGroup *SubResource `json:"capacityReservationGroup,omitempty"` +} + +// CapacityReservationProperties properties of the Capacity reservation. +type CapacityReservationProperties struct { + // ReservationID - READ-ONLY; A unique id generated and assigned to the capacity reservation by the platform which does not change throughout the lifetime of the resource. + ReservationID *string `json:"reservationId,omitempty"` + // VirtualMachinesAssociated - READ-ONLY; A list of all virtual machine resource ids that are associated with the capacity reservation. + VirtualMachinesAssociated *[]SubResourceReadOnly `json:"virtualMachinesAssociated,omitempty"` + // ProvisioningTime - READ-ONLY; The date time when the capacity reservation was last updated. ProvisioningTime *date.Time `json:"provisioningTime,omitempty"` // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. ProvisioningState *string `json:"provisioningState,omitempty"` - // InstanceView - READ-ONLY; The dedicated host instance view. - InstanceView *DedicatedHostInstanceView `json:"instanceView,omitempty"` + // InstanceView - READ-ONLY; The Capacity reservation instance view. + InstanceView *CapacityReservationInstanceView `json:"instanceView,omitempty"` } -// MarshalJSON is the custom marshaler for DedicatedHostProperties. -func (dhp DedicatedHostProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservationProperties. +func (crp CapacityReservationProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dhp.PlatformFaultDomain != nil { - objectMap["platformFaultDomain"] = dhp.PlatformFaultDomain - } - if dhp.AutoReplaceOnFailure != nil { - objectMap["autoReplaceOnFailure"] = dhp.AutoReplaceOnFailure - } - if dhp.LicenseType != "" { - objectMap["licenseType"] = dhp.LicenseType - } return json.Marshal(objectMap) } -// DedicatedHostsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// CapacityReservationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a // long-running operation. -type DedicatedHostsCreateOrUpdateFuture struct { +type CapacityReservationsCreateOrUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(DedicatedHostsClient) (DedicatedHost, error) + Result func(CapacityReservationsClient) (CapacityReservation, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DedicatedHostsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *CapacityReservationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -1456,40 +1282,40 @@ func (future *DedicatedHostsCreateOrUpdateFuture) UnmarshalJSON(body []byte) err return nil } -// result is the default implementation for DedicatedHostsCreateOrUpdateFuture.Result. -func (future *DedicatedHostsCreateOrUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) { +// result is the default implementation for CapacityReservationsCreateOrUpdateFuture.Result. +func (future *CapacityReservationsCreateOrUpdateFuture) result(client CapacityReservationsClient) (cr CapacityReservation, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - dh.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsCreateOrUpdateFuture") + cr.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CapacityReservationsCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { - dh, err = client.CreateOrUpdateResponder(dh.Response.Response) + if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent { + cr, err = client.CreateOrUpdateResponder(cr.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsCreateOrUpdateFuture", "Result", cr.Response.Response, "Failure responding to request") } } return } -// DedicatedHostsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DedicatedHostsDeleteFuture struct { +// CapacityReservationsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CapacityReservationsDeleteFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(DedicatedHostsClient) (autorest.Response, error) + Result func(CapacityReservationsClient) (autorest.Response, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DedicatedHostsDeleteFuture) UnmarshalJSON(body []byte) error { +func (future *CapacityReservationsDeleteFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -1499,34 +1325,34 @@ func (future *DedicatedHostsDeleteFuture) UnmarshalJSON(body []byte) error { return nil } -// result is the default implementation for DedicatedHostsDeleteFuture.Result. -func (future *DedicatedHostsDeleteFuture) result(client DedicatedHostsClient) (ar autorest.Response, err error) { +// result is the default implementation for CapacityReservationsDeleteFuture.Result. +func (future *CapacityReservationsDeleteFuture) result(client CapacityReservationsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsDeleteFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsDeleteFuture") + err = azure.NewAsyncOpIncompleteError("compute.CapacityReservationsDeleteFuture") return } ar.Response = future.Response() return } -// DedicatedHostsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DedicatedHostsUpdateFuture struct { +// CapacityReservationsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CapacityReservationsUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(DedicatedHostsClient) (DedicatedHost, error) + Result func(CapacityReservationsClient) (CapacityReservation, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DedicatedHostsUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *CapacityReservationsUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -1536,51 +1362,56 @@ func (future *DedicatedHostsUpdateFuture) UnmarshalJSON(body []byte) error { return nil } -// result is the default implementation for DedicatedHostsUpdateFuture.Result. -func (future *DedicatedHostsUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) { +// result is the default implementation for CapacityReservationsUpdateFuture.Result. +func (future *CapacityReservationsUpdateFuture) result(client CapacityReservationsClient) (cr CapacityReservation, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - dh.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsUpdateFuture") + cr.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CapacityReservationsUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { - dh, err = client.UpdateResponder(dh.Response.Response) + if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent { + cr, err = client.UpdateResponder(cr.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.CapacityReservationsUpdateFuture", "Result", cr.Response.Response, "Failure responding to request") } } return } -// DedicatedHostUpdate specifies information about the dedicated host. Only tags, autoReplaceOnFailure and -// licenseType may be updated. -type DedicatedHostUpdate struct { - *DedicatedHostProperties `json:"properties,omitempty"` +// CapacityReservationUpdate specifies information about the capacity reservation. Only tags and +// sku.capacity can be updated. +type CapacityReservationUpdate struct { + *CapacityReservationProperties `json:"properties,omitempty"` + // Sku - SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently VM Skus with the capability called 'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values. + Sku *Sku `json:"sku,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DedicatedHostUpdate. -func (dhu DedicatedHostUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for CapacityReservationUpdate. +func (cru CapacityReservationUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dhu.DedicatedHostProperties != nil { - objectMap["properties"] = dhu.DedicatedHostProperties + if cru.CapacityReservationProperties != nil { + objectMap["properties"] = cru.CapacityReservationProperties } - if dhu.Tags != nil { - objectMap["tags"] = dhu.Tags + if cru.Sku != nil { + objectMap["sku"] = cru.Sku + } + if cru.Tags != nil { + objectMap["tags"] = cru.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DedicatedHostUpdate struct. -func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for CapacityReservationUpdate struct. +func (cru *CapacityReservationUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -1590,12 +1421,21 @@ func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var dedicatedHostProperties DedicatedHostProperties - err = json.Unmarshal(*v, &dedicatedHostProperties) + var capacityReservationProperties CapacityReservationProperties + err = json.Unmarshal(*v, &capacityReservationProperties) if err != nil { return err } - dhu.DedicatedHostProperties = &dedicatedHostProperties + cru.CapacityReservationProperties = &capacityReservationProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + cru.Sku = &sku } case "tags": if v != nil { @@ -1604,7 +1444,7 @@ func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - dhu.Tags = tags + cru.Tags = tags } } } @@ -1612,20 +1452,2516 @@ func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error { return nil } -// DiagnosticsProfile specifies the boot diagnostic settings state.

Minimum api-version: -// 2015-06-15. -type DiagnosticsProfile struct { - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

You can easily view the output of your console log.

Azure also enables you to see a screenshot of the VM from the hypervisor. - BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +// CapacityReservationUtilization represents the capacity reservation utilization in terms of resources +// allocated. +type CapacityReservationUtilization struct { + // VirtualMachinesAllocated - READ-ONLY; A list of all virtual machines resource ids allocated against the capacity reservation. + VirtualMachinesAllocated *[]SubResourceReadOnly `json:"virtualMachinesAllocated,omitempty"` } -// DiffDiskSettings describes the parameters of ephemeral disk settings that can be specified for operating -// system disk.

NOTE: The ephemeral disk settings can only be specified for managed disk. -type DiffDiskSettings struct { - // Option - Specifies the ephemeral disk settings for operating system disk. Possible values include: 'DiffDiskOptionsLocal' - Option DiffDiskOptions `json:"option,omitempty"` - // Placement - Specifies the ephemeral disk placement for operating system disk.

Possible values are:

**CacheDisk**

**ResourceDisk**

Default: **CacheDisk** if one is configured for the VM size otherwise **ResourceDisk** is used.

Refer to VM size documentation for Windows VM at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Possible values include: 'DiffDiskPlacementCacheDisk', 'DiffDiskPlacementResourceDisk' - Placement DiffDiskPlacement `json:"placement,omitempty"` +// MarshalJSON is the custom marshaler for CapacityReservationUtilization. +func (cru CapacityReservationUtilization) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CloudError an error response from the Compute service. +type CloudError struct { + Error *APIError `json:"error,omitempty"` +} + +// CloudService describes the cloud service. +type CloudService struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Resource Id. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + Properties *CloudServiceProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for CloudService. +func (cs CloudService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cs.Location != nil { + objectMap["location"] = cs.Location + } + if cs.Tags != nil { + objectMap["tags"] = cs.Tags + } + if cs.Properties != nil { + objectMap["properties"] = cs.Properties + } + return json.Marshal(objectMap) +} + +// CloudServiceExtensionProfile describes a cloud service extension profile. +type CloudServiceExtensionProfile struct { + // Extensions - List of extensions for the cloud service. + Extensions *[]Extension `json:"extensions,omitempty"` +} + +// CloudServiceExtensionProperties extension Properties. +type CloudServiceExtensionProperties struct { + // Publisher - The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + // Type - Specifies the type of the extension. + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the extension. Specifies the version of the extension. If this element is not specified or an asterisk (*) is used as the value, the latest version of the extension is used. If the value is specified with a major version number and an asterisk as the minor version number (X.), the latest minor version of the specified major version is selected. If a major version number and a minor version number are specified (X.Y), the specific extension version is selected. If a version is specified, an auto-upgrade is performed on the role instance. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // AutoUpgradeMinorVersion - Explicitly specify whether platform can automatically upgrade typeHandlerVersion to higher minor versions when they become available. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + // Settings - Public settings for the extension. For JSON extensions, this is the JSON settings for the extension. For XML Extension (like RDP), this is the XML setting for the extension. + Settings *string `json:"settings,omitempty"` + // ProtectedSettings - Protected settings for the extension which are encrypted before sent to the role instance. + ProtectedSettings *string `json:"protectedSettings,omitempty"` + ProtectedSettingsFromKeyVault *CloudServiceVaultAndSecretReference `json:"protectedSettingsFromKeyVault,omitempty"` + // ForceUpdateTag - Tag to force apply the provided public and protected settings. + // Changing the tag value allows for re-running the extension without changing any of the public or protected settings. + // If forceUpdateTag is not changed, updates to public or protected settings would still be applied by the handler. + // If neither forceUpdateTag nor any of public or protected settings change, extension would flow to the role instance with the same sequence-number, and + // it is up to handler implementation whether to re-run it or not + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // RolesAppliedTo - Optional list of roles to apply this extension. If property is not specified or '*' is specified, extension is applied to all roles in the cloud service. + RolesAppliedTo *[]string `json:"rolesAppliedTo,omitempty"` +} + +// MarshalJSON is the custom marshaler for CloudServiceExtensionProperties. +func (csep CloudServiceExtensionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if csep.Publisher != nil { + objectMap["publisher"] = csep.Publisher + } + if csep.Type != nil { + objectMap["type"] = csep.Type + } + if csep.TypeHandlerVersion != nil { + objectMap["typeHandlerVersion"] = csep.TypeHandlerVersion + } + if csep.AutoUpgradeMinorVersion != nil { + objectMap["autoUpgradeMinorVersion"] = csep.AutoUpgradeMinorVersion + } + if csep.Settings != nil { + objectMap["settings"] = csep.Settings + } + if csep.ProtectedSettings != nil { + objectMap["protectedSettings"] = csep.ProtectedSettings + } + if csep.ProtectedSettingsFromKeyVault != nil { + objectMap["protectedSettingsFromKeyVault"] = csep.ProtectedSettingsFromKeyVault + } + if csep.ForceUpdateTag != nil { + objectMap["forceUpdateTag"] = csep.ForceUpdateTag + } + if csep.RolesAppliedTo != nil { + objectMap["rolesAppliedTo"] = csep.RolesAppliedTo + } + return json.Marshal(objectMap) +} + +// CloudServiceInstanceView instanceView of CloudService as a whole +type CloudServiceInstanceView struct { + autorest.Response `json:"-"` + RoleInstance *InstanceViewStatusesSummary `json:"roleInstance,omitempty"` + // SdkVersion - READ-ONLY; The version of the SDK that was used to generate the package for the cloud service. + SdkVersion *string `json:"sdkVersion,omitempty"` + // PrivateIds - READ-ONLY; Specifies a list of unique identifiers generated internally for the cloud service.

NOTE: If you are using Azure Diagnostics extension, this property can be used as 'DeploymentId' for querying details. + PrivateIds *[]string `json:"privateIds,omitempty"` + // Statuses - READ-ONLY + Statuses *[]ResourceInstanceViewStatus `json:"statuses,omitempty"` +} + +// MarshalJSON is the custom marshaler for CloudServiceInstanceView. +func (csiv CloudServiceInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if csiv.RoleInstance != nil { + objectMap["roleInstance"] = csiv.RoleInstance + } + return json.Marshal(objectMap) +} + +// CloudServiceListResult ... +type CloudServiceListResult struct { + autorest.Response `json:"-"` + Value *[]CloudService `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// CloudServiceListResultIterator provides access to a complete listing of CloudService values. +type CloudServiceListResultIterator struct { + i int + page CloudServiceListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudServiceListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudServiceListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudServiceListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudServiceListResultIterator) Response() CloudServiceListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudServiceListResultIterator) Value() CloudService { + if !iter.page.NotDone() { + return CloudService{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudServiceListResultIterator type. +func NewCloudServiceListResultIterator(page CloudServiceListResultPage) CloudServiceListResultIterator { + return CloudServiceListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cslr CloudServiceListResult) IsEmpty() bool { + return cslr.Value == nil || len(*cslr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (cslr CloudServiceListResult) hasNextLink() bool { + return cslr.NextLink != nil && len(*cslr.NextLink) != 0 +} + +// cloudServiceListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cslr CloudServiceListResult) cloudServiceListResultPreparer(ctx context.Context) (*http.Request, error) { + if !cslr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cslr.NextLink))) +} + +// CloudServiceListResultPage contains a page of CloudService values. +type CloudServiceListResultPage struct { + fn func(context.Context, CloudServiceListResult) (CloudServiceListResult, error) + cslr CloudServiceListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudServiceListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.cslr) + if err != nil { + return err + } + page.cslr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudServiceListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudServiceListResultPage) NotDone() bool { + return !page.cslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudServiceListResultPage) Response() CloudServiceListResult { + return page.cslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudServiceListResultPage) Values() []CloudService { + if page.cslr.IsEmpty() { + return nil + } + return *page.cslr.Value +} + +// Creates a new instance of the CloudServiceListResultPage type. +func NewCloudServiceListResultPage(cur CloudServiceListResult, getNextPage func(context.Context, CloudServiceListResult) (CloudServiceListResult, error)) CloudServiceListResultPage { + return CloudServiceListResultPage{ + fn: getNextPage, + cslr: cur, + } +} + +// CloudServiceNetworkProfile network Profile for the cloud service. +type CloudServiceNetworkProfile struct { + // LoadBalancerConfigurations - List of Load balancer configurations. Cloud service can have up to two load balancer configurations, corresponding to a Public Load Balancer and an Internal Load Balancer. + LoadBalancerConfigurations *[]LoadBalancerConfiguration `json:"loadBalancerConfigurations,omitempty"` + // SwappableCloudService - The id reference of the cloud service containing the target IP with which the subject cloud service can perform a swap. This property cannot be updated once it is set. The swappable cloud service referred by this id must be present otherwise an error will be thrown. + SwappableCloudService *SubResource `json:"swappableCloudService,omitempty"` +} + +// CloudServiceOsProfile describes the OS profile for the cloud service. +type CloudServiceOsProfile struct { + // Secrets - Specifies set of certificates that should be installed onto the role instances. + Secrets *[]CloudServiceVaultSecretGroup `json:"secrets,omitempty"` +} + +// CloudServiceProperties cloud service properties +type CloudServiceProperties struct { + // PackageURL - Specifies a URL that refers to the location of the service package in the Blob service. The service package URL can be Shared Access Signature (SAS) URI from any storage account. + // This is a write-only property and is not returned in GET calls. + PackageURL *string `json:"packageUrl,omitempty"` + // Configuration - Specifies the XML service configuration (.cscfg) for the cloud service. + Configuration *string `json:"configuration,omitempty"` + // ConfigurationURL - Specifies a URL that refers to the location of the service configuration in the Blob service. The service package URL can be Shared Access Signature (SAS) URI from any storage account. + // This is a write-only property and is not returned in GET calls. + ConfigurationURL *string `json:"configurationUrl,omitempty"` + // StartCloudService - (Optional) Indicates whether to start the cloud service immediately after it is created. The default value is `true`. + // If false, the service model is still deployed, but the code is not run immediately. Instead, the service is PoweredOff until you call Start, at which time the service will be started. A deployed service still incurs charges, even if it is poweredoff. + StartCloudService *bool `json:"startCloudService,omitempty"` + // AllowModelOverride - (Optional) Indicates whether the role sku properties (roleProfile.roles.sku) specified in the model/template should override the role instance count and vm size specified in the .cscfg and .csdef respectively. + // The default value is `false`. + AllowModelOverride *bool `json:"allowModelOverride,omitempty"` + // UpgradeMode - Possible values include: 'CloudServiceUpgradeModeAuto', 'CloudServiceUpgradeModeManual', 'CloudServiceUpgradeModeSimultaneous' + UpgradeMode CloudServiceUpgradeMode `json:"upgradeMode,omitempty"` + RoleProfile *CloudServiceRoleProfile `json:"roleProfile,omitempty"` + OsProfile *CloudServiceOsProfile `json:"osProfile,omitempty"` + NetworkProfile *CloudServiceNetworkProfile `json:"networkProfile,omitempty"` + ExtensionProfile *CloudServiceExtensionProfile `json:"extensionProfile,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // UniqueID - READ-ONLY; The unique identifier for the cloud service. + UniqueID *string `json:"uniqueId,omitempty"` +} + +// MarshalJSON is the custom marshaler for CloudServiceProperties. +func (csp CloudServiceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if csp.PackageURL != nil { + objectMap["packageUrl"] = csp.PackageURL + } + if csp.Configuration != nil { + objectMap["configuration"] = csp.Configuration + } + if csp.ConfigurationURL != nil { + objectMap["configurationUrl"] = csp.ConfigurationURL + } + if csp.StartCloudService != nil { + objectMap["startCloudService"] = csp.StartCloudService + } + if csp.AllowModelOverride != nil { + objectMap["allowModelOverride"] = csp.AllowModelOverride + } + if csp.UpgradeMode != "" { + objectMap["upgradeMode"] = csp.UpgradeMode + } + if csp.RoleProfile != nil { + objectMap["roleProfile"] = csp.RoleProfile + } + if csp.OsProfile != nil { + objectMap["osProfile"] = csp.OsProfile + } + if csp.NetworkProfile != nil { + objectMap["networkProfile"] = csp.NetworkProfile + } + if csp.ExtensionProfile != nil { + objectMap["extensionProfile"] = csp.ExtensionProfile + } + return json.Marshal(objectMap) +} + +// CloudServiceRole describes a role of the cloud service. +type CloudServiceRole struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Resource id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` + Sku *CloudServiceRoleSku `json:"sku,omitempty"` + Properties *CloudServiceRoleProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for CloudServiceRole. +func (csr CloudServiceRole) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if csr.Sku != nil { + objectMap["sku"] = csr.Sku + } + if csr.Properties != nil { + objectMap["properties"] = csr.Properties + } + return json.Marshal(objectMap) +} + +// CloudServiceRoleInstancesDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CloudServiceRoleInstancesDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServiceRoleInstancesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServiceRoleInstancesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServiceRoleInstancesDeleteFuture.Result. +func (future *CloudServiceRoleInstancesDeleteFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServiceRoleInstancesRebuildFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CloudServiceRoleInstancesRebuildFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServiceRoleInstancesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServiceRoleInstancesRebuildFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServiceRoleInstancesRebuildFuture.Result. +func (future *CloudServiceRoleInstancesRebuildFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesRebuildFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesRebuildFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServiceRoleInstancesReimageFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CloudServiceRoleInstancesReimageFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServiceRoleInstancesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServiceRoleInstancesReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServiceRoleInstancesReimageFuture.Result. +func (future *CloudServiceRoleInstancesReimageFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesReimageFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServiceRoleInstancesRestartFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CloudServiceRoleInstancesRestartFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServiceRoleInstancesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServiceRoleInstancesRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServiceRoleInstancesRestartFuture.Result. +func (future *CloudServiceRoleInstancesRestartFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesRestartFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServiceRoleListResult ... +type CloudServiceRoleListResult struct { + autorest.Response `json:"-"` + Value *[]CloudServiceRole `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// CloudServiceRoleListResultIterator provides access to a complete listing of CloudServiceRole values. +type CloudServiceRoleListResultIterator struct { + i int + page CloudServiceRoleListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *CloudServiceRoleListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *CloudServiceRoleListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter CloudServiceRoleListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter CloudServiceRoleListResultIterator) Response() CloudServiceRoleListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter CloudServiceRoleListResultIterator) Value() CloudServiceRole { + if !iter.page.NotDone() { + return CloudServiceRole{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the CloudServiceRoleListResultIterator type. +func NewCloudServiceRoleListResultIterator(page CloudServiceRoleListResultPage) CloudServiceRoleListResultIterator { + return CloudServiceRoleListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (csrlr CloudServiceRoleListResult) IsEmpty() bool { + return csrlr.Value == nil || len(*csrlr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (csrlr CloudServiceRoleListResult) hasNextLink() bool { + return csrlr.NextLink != nil && len(*csrlr.NextLink) != 0 +} + +// cloudServiceRoleListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (csrlr CloudServiceRoleListResult) cloudServiceRoleListResultPreparer(ctx context.Context) (*http.Request, error) { + if !csrlr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(csrlr.NextLink))) +} + +// CloudServiceRoleListResultPage contains a page of CloudServiceRole values. +type CloudServiceRoleListResultPage struct { + fn func(context.Context, CloudServiceRoleListResult) (CloudServiceRoleListResult, error) + csrlr CloudServiceRoleListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *CloudServiceRoleListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.csrlr) + if err != nil { + return err + } + page.csrlr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *CloudServiceRoleListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page CloudServiceRoleListResultPage) NotDone() bool { + return !page.csrlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page CloudServiceRoleListResultPage) Response() CloudServiceRoleListResult { + return page.csrlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page CloudServiceRoleListResultPage) Values() []CloudServiceRole { + if page.csrlr.IsEmpty() { + return nil + } + return *page.csrlr.Value +} + +// Creates a new instance of the CloudServiceRoleListResultPage type. +func NewCloudServiceRoleListResultPage(cur CloudServiceRoleListResult, getNextPage func(context.Context, CloudServiceRoleListResult) (CloudServiceRoleListResult, error)) CloudServiceRoleListResultPage { + return CloudServiceRoleListResultPage{ + fn: getNextPage, + csrlr: cur, + } +} + +// CloudServiceRoleProfile describes the role profile for the cloud service. +type CloudServiceRoleProfile struct { + // Roles - List of roles for the cloud service. + Roles *[]CloudServiceRoleProfileProperties `json:"roles,omitempty"` +} + +// CloudServiceRoleProfileProperties describes the role properties. +type CloudServiceRoleProfileProperties struct { + // Name - Resource name. + Name *string `json:"name,omitempty"` + Sku *CloudServiceRoleSku `json:"sku,omitempty"` +} + +// CloudServiceRoleProperties ... +type CloudServiceRoleProperties struct { + // UniqueID - READ-ONLY; Specifies the ID which uniquely identifies a cloud service role. + UniqueID *string `json:"uniqueId,omitempty"` +} + +// MarshalJSON is the custom marshaler for CloudServiceRoleProperties. +func (csrp CloudServiceRoleProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CloudServiceRoleSku describes the cloud service role sku. +type CloudServiceRoleSku struct { + // Name - The sku name. NOTE: If the new SKU is not supported on the hardware the cloud service is currently on, you need to delete and recreate the cloud service or move back to the old sku. + Name *string `json:"name,omitempty"` + // Tier - Specifies the tier of the cloud service. Possible Values are

**Standard**

**Basic** + Tier *string `json:"tier,omitempty"` + // Capacity - Specifies the number of role instances in the cloud service. + Capacity *int64 `json:"capacity,omitempty"` +} + +// CloudServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CloudServicesCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (CloudService, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesCreateOrUpdateFuture.Result. +func (future *CloudServicesCreateOrUpdateFuture) result(client CloudServicesClient) (cs CloudService, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + cs.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.CreateOrUpdateResponder(cs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") + } + } + return +} + +// CloudServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesDeleteFuture.Result. +func (future *CloudServicesDeleteFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesDeleteInstancesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type CloudServicesDeleteInstancesFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesDeleteInstancesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesDeleteInstancesFuture.Result. +func (future *CloudServicesDeleteInstancesFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesDeleteInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesDeleteInstancesFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesPowerOffFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesPowerOffFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesPowerOffFuture.Result. +func (future *CloudServicesPowerOffFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesRebuildFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesRebuildFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesRebuildFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesRebuildFuture.Result. +func (future *CloudServicesRebuildFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesRebuildFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesRebuildFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesReimageFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesReimageFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesReimageFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesReimageFuture.Result. +func (future *CloudServicesReimageFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesReimageFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesRestartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesRestartFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesRestartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesRestartFuture.Result. +func (future *CloudServicesRestartFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesRestartFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesStartFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesStartFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesStartFuture.Result. +func (future *CloudServicesStartFuture) result(client CloudServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesStartFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesUpdateDomainWalkUpdateDomainFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type CloudServicesUpdateDomainWalkUpdateDomainFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesUpdateDomainClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesUpdateDomainWalkUpdateDomainFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesUpdateDomainWalkUpdateDomainFuture.Result. +func (future *CloudServicesUpdateDomainWalkUpdateDomainFuture) result(client CloudServicesUpdateDomainClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainWalkUpdateDomainFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesUpdateDomainWalkUpdateDomainFuture") + return + } + ar.Response = future.Response() + return +} + +// CloudServicesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CloudServicesUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(CloudServicesClient) (CloudService, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *CloudServicesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for CloudServicesUpdateFuture.Result. +func (future *CloudServicesUpdateFuture) result(client CloudServicesClient) (cs CloudService, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + cs.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.CloudServicesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.UpdateResponder(cs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") + } + } + return +} + +// CloudServiceUpdate ... +type CloudServiceUpdate struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for CloudServiceUpdate. +func (csu CloudServiceUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if csu.Tags != nil { + objectMap["tags"] = csu.Tags + } + return json.Marshal(objectMap) +} + +// CloudServiceVaultAndSecretReference ... +type CloudServiceVaultAndSecretReference struct { + SourceVault *SubResource `json:"sourceVault,omitempty"` + SecretURL *string `json:"secretUrl,omitempty"` +} + +// CloudServiceVaultCertificate describes a single certificate reference in a Key Vault, and where the +// certificate should reside on the role instance. +type CloudServiceVaultCertificate struct { + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. + CertificateURL *string `json:"certificateUrl,omitempty"` +} + +// CloudServiceVaultSecretGroup describes a set of certificates which are all in the same Key Vault. +type CloudServiceVaultSecretGroup struct { + // SourceVault - The relative URL of the Key Vault containing all of the certificates in VaultCertificates. + SourceVault *SubResource `json:"sourceVault,omitempty"` + // VaultCertificates - The list of key vault references in SourceVault which contain certificates. + VaultCertificates *[]CloudServiceVaultCertificate `json:"vaultCertificates,omitempty"` +} + +// CommunityGallery specifies information about the Community Gallery that you want to create or update. +type CommunityGallery struct { + autorest.Response `json:"-"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + *CommunityGalleryIdentifier `json:"identifier,omitempty"` +} + +// MarshalJSON is the custom marshaler for CommunityGallery. +func (cg CommunityGallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cg.CommunityGalleryIdentifier != nil { + objectMap["identifier"] = cg.CommunityGalleryIdentifier + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for CommunityGallery struct. +func (cg *CommunityGallery) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + cg.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + cg.Location = &location + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + cg.Type = &typeVar + } + case "identifier": + if v != nil { + var communityGalleryIdentifier CommunityGalleryIdentifier + err = json.Unmarshal(*v, &communityGalleryIdentifier) + if err != nil { + return err + } + cg.CommunityGalleryIdentifier = &communityGalleryIdentifier + } + } + } + + return nil +} + +// CommunityGalleryIdentifier the identifier information of community gallery. +type CommunityGalleryIdentifier struct { + // UniqueID - The unique id of this community gallery. + UniqueID *string `json:"uniqueId,omitempty"` +} + +// CommunityGalleryImage specifies information about the gallery image definition that you want to create +// or update. +type CommunityGalleryImage struct { + autorest.Response `json:"-"` + *CommunityGalleryImageProperties `json:"properties,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + *CommunityGalleryIdentifier `json:"identifier,omitempty"` +} + +// MarshalJSON is the custom marshaler for CommunityGalleryImage. +func (cgiVar CommunityGalleryImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cgiVar.CommunityGalleryImageProperties != nil { + objectMap["properties"] = cgiVar.CommunityGalleryImageProperties + } + if cgiVar.CommunityGalleryIdentifier != nil { + objectMap["identifier"] = cgiVar.CommunityGalleryIdentifier + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for CommunityGalleryImage struct. +func (cgiVar *CommunityGalleryImage) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var communityGalleryImageProperties CommunityGalleryImageProperties + err = json.Unmarshal(*v, &communityGalleryImageProperties) + if err != nil { + return err + } + cgiVar.CommunityGalleryImageProperties = &communityGalleryImageProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + cgiVar.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + cgiVar.Location = &location + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + cgiVar.Type = &typeVar + } + case "identifier": + if v != nil { + var communityGalleryIdentifier CommunityGalleryIdentifier + err = json.Unmarshal(*v, &communityGalleryIdentifier) + if err != nil { + return err + } + cgiVar.CommunityGalleryIdentifier = &communityGalleryIdentifier + } + } + } + + return nil +} + +// CommunityGalleryImageProperties describes the properties of a gallery image definition. +type CommunityGalleryImageProperties struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` + Disallowed *Disallowed `json:"disallowed,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // Features - A list of gallery image features. + Features *[]GalleryImageFeature `json:"features,omitempty"` + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` +} + +// CommunityGalleryImageVersion specifies information about the gallery image version that you want to +// create or update. +type CommunityGalleryImageVersion struct { + autorest.Response `json:"-"` + *CommunityGalleryImageVersionProperties `json:"properties,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + *CommunityGalleryIdentifier `json:"identifier,omitempty"` +} + +// MarshalJSON is the custom marshaler for CommunityGalleryImageVersion. +func (cgiv CommunityGalleryImageVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cgiv.CommunityGalleryImageVersionProperties != nil { + objectMap["properties"] = cgiv.CommunityGalleryImageVersionProperties + } + if cgiv.CommunityGalleryIdentifier != nil { + objectMap["identifier"] = cgiv.CommunityGalleryIdentifier + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for CommunityGalleryImageVersion struct. +func (cgiv *CommunityGalleryImageVersion) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var communityGalleryImageVersionProperties CommunityGalleryImageVersionProperties + err = json.Unmarshal(*v, &communityGalleryImageVersionProperties) + if err != nil { + return err + } + cgiv.CommunityGalleryImageVersionProperties = &communityGalleryImageVersionProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + cgiv.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + cgiv.Location = &location + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + cgiv.Type = &typeVar + } + case "identifier": + if v != nil { + var communityGalleryIdentifier CommunityGalleryIdentifier + err = json.Unmarshal(*v, &communityGalleryIdentifier) + if err != nil { + return err + } + cgiv.CommunityGalleryIdentifier = &communityGalleryIdentifier + } + } + } + + return nil +} + +// CommunityGalleryImageVersionProperties describes the properties of a gallery image version. +type CommunityGalleryImageVersionProperties struct { + // PublishedDate - The published date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` +} + +// CreationData data used when creating a disk. +type CreationData struct { + // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'DiskCreateOptionEmpty', 'DiskCreateOptionAttach', 'DiskCreateOptionFromImage', 'DiskCreateOptionImport', 'DiskCreateOptionCopy', 'DiskCreateOptionRestore', 'DiskCreateOptionUpload', 'DiskCreateOptionCopyStart' + CreateOption DiskCreateOption `json:"createOption,omitempty"` + // StorageAccountID - Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk. + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ImageReference - Disk source information. + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + // GalleryImageReference - Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk. + GalleryImageReference *ImageDiskReference `json:"galleryImageReference,omitempty"` + // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. + SourceURI *string `json:"sourceUri,omitempty"` + // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. + SourceResourceID *string `json:"sourceResourceId,omitempty"` + // SourceUniqueID - READ-ONLY; If this field is set, this is the unique id identifying the source of this resource. + SourceUniqueID *string `json:"sourceUniqueId,omitempty"` + // UploadSizeBytes - If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). + UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"` + // LogicalSectorSize - Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default. + LogicalSectorSize *int32 `json:"logicalSectorSize,omitempty"` +} + +// MarshalJSON is the custom marshaler for CreationData. +func (cd CreationData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cd.CreateOption != "" { + objectMap["createOption"] = cd.CreateOption + } + if cd.StorageAccountID != nil { + objectMap["storageAccountId"] = cd.StorageAccountID + } + if cd.ImageReference != nil { + objectMap["imageReference"] = cd.ImageReference + } + if cd.GalleryImageReference != nil { + objectMap["galleryImageReference"] = cd.GalleryImageReference + } + if cd.SourceURI != nil { + objectMap["sourceUri"] = cd.SourceURI + } + if cd.SourceResourceID != nil { + objectMap["sourceResourceId"] = cd.SourceResourceID + } + if cd.UploadSizeBytes != nil { + objectMap["uploadSizeBytes"] = cd.UploadSizeBytes + } + if cd.LogicalSectorSize != nil { + objectMap["logicalSectorSize"] = cd.LogicalSectorSize + } + return json.Marshal(objectMap) +} + +// DataDisk describes a data disk. +type DataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + // ToBeDetached - Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset + ToBeDetached *bool `json:"toBeDetached,omitempty"` + // DiskIOPSReadWrite - READ-ONLY; Specifies the Read-Write IOPS for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - READ-ONLY; Specifies the bandwidth in MB per second for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` + // DetachOption - Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: **ForceDetach**.

detachOption: **ForceDetach** is applicable only for managed data disks. If a previous detachment attempt of the data disk did not complete due to an unexpected failure from the virtual machine and the disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes might not have been flushed when using this detach behavior.

This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible values include: 'DiskDetachOptionTypesForceDetach' + DetachOption DiskDetachOptionTypes `json:"detachOption,omitempty"` + // DeleteOption - Specifies whether data disk should be deleted or detached upon VM deletion.

Possible values:

**Delete** If this value is used, the data disk is deleted when VM is deleted.

**Detach** If this value is used, the data disk is retained after VM is deleted.

The default value is set to **detach**. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach' + DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"` +} + +// MarshalJSON is the custom marshaler for DataDisk. +func (dd DataDisk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dd.Lun != nil { + objectMap["lun"] = dd.Lun + } + if dd.Name != nil { + objectMap["name"] = dd.Name + } + if dd.Vhd != nil { + objectMap["vhd"] = dd.Vhd + } + if dd.Image != nil { + objectMap["image"] = dd.Image + } + if dd.Caching != "" { + objectMap["caching"] = dd.Caching + } + if dd.WriteAcceleratorEnabled != nil { + objectMap["writeAcceleratorEnabled"] = dd.WriteAcceleratorEnabled + } + if dd.CreateOption != "" { + objectMap["createOption"] = dd.CreateOption + } + if dd.DiskSizeGB != nil { + objectMap["diskSizeGB"] = dd.DiskSizeGB + } + if dd.ManagedDisk != nil { + objectMap["managedDisk"] = dd.ManagedDisk + } + if dd.ToBeDetached != nil { + objectMap["toBeDetached"] = dd.ToBeDetached + } + if dd.DetachOption != "" { + objectMap["detachOption"] = dd.DetachOption + } + if dd.DeleteOption != "" { + objectMap["deleteOption"] = dd.DeleteOption + } + return json.Marshal(objectMap) +} + +// DataDiskImage contains the data disk images information. +type DataDiskImage struct { + // Lun - READ-ONLY; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` +} + +// MarshalJSON is the custom marshaler for DataDiskImage. +func (ddi DataDiskImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DataDiskImageEncryption contains encryption settings for a data disk image. +type DataDiskImageEncryption struct { + // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. + Lun *int32 `json:"lun,omitempty"` + // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` +} + +// DedicatedHost specifies information about the Dedicated host. +type DedicatedHost struct { + autorest.Response `json:"-"` + *DedicatedHostProperties `json:"properties,omitempty"` + // Sku - SKU of the dedicated host for Hardware Generation and VM family. Only name is required to be set. List Microsoft.Compute SKUs for a list of possible values. + Sku *Sku `json:"sku,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DedicatedHost. +func (dh DedicatedHost) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dh.DedicatedHostProperties != nil { + objectMap["properties"] = dh.DedicatedHostProperties + } + if dh.Sku != nil { + objectMap["sku"] = dh.Sku + } + if dh.Location != nil { + objectMap["location"] = dh.Location + } + if dh.Tags != nil { + objectMap["tags"] = dh.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DedicatedHost struct. +func (dh *DedicatedHost) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var dedicatedHostProperties DedicatedHostProperties + err = json.Unmarshal(*v, &dedicatedHostProperties) + if err != nil { + return err + } + dh.DedicatedHostProperties = &dedicatedHostProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + dh.Sku = &sku + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dh.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dh.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dh.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + dh.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dh.Tags = tags + } + } + } + + return nil +} + +// DedicatedHostAllocatableVM represents the dedicated host unutilized capacity in terms of a specific VM +// size. +type DedicatedHostAllocatableVM struct { + // VMSize - VM size in terms of which the unutilized capacity is represented. + VMSize *string `json:"vmSize,omitempty"` + // Count - Maximum number of VMs of size vmSize that can fit in the dedicated host's remaining capacity. + Count *float64 `json:"count,omitempty"` +} + +// DedicatedHostAvailableCapacity dedicated host unutilized capacity. +type DedicatedHostAvailableCapacity struct { + // AllocatableVMs - The unutilized capacity of the dedicated host represented in terms of each VM size that is allowed to be deployed to the dedicated host. + AllocatableVMs *[]DedicatedHostAllocatableVM `json:"allocatableVMs,omitempty"` +} + +// DedicatedHostGroup specifies information about the dedicated host group that the dedicated hosts should +// be assigned to.

Currently, a dedicated host can only be added to a dedicated host group at +// creation time. An existing dedicated host cannot be added to another dedicated host group. +type DedicatedHostGroup struct { + autorest.Response `json:"-"` + *DedicatedHostGroupProperties `json:"properties,omitempty"` + // Zones - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. If not provided, the group supports all zones in the region. If provided, enforces each host in the group to be in the same zone. + Zones *[]string `json:"zones,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostGroup. +func (dhg DedicatedHostGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhg.DedicatedHostGroupProperties != nil { + objectMap["properties"] = dhg.DedicatedHostGroupProperties + } + if dhg.Zones != nil { + objectMap["zones"] = dhg.Zones + } + if dhg.Location != nil { + objectMap["location"] = dhg.Location + } + if dhg.Tags != nil { + objectMap["tags"] = dhg.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DedicatedHostGroup struct. +func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var dedicatedHostGroupProperties DedicatedHostGroupProperties + err = json.Unmarshal(*v, &dedicatedHostGroupProperties) + if err != nil { + return err + } + dhg.DedicatedHostGroupProperties = &dedicatedHostGroupProperties + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + dhg.Zones = &zones + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dhg.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dhg.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dhg.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + dhg.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dhg.Tags = tags + } + } + } + + return nil +} + +// DedicatedHostGroupInstanceView ... +type DedicatedHostGroupInstanceView struct { + // Hosts - List of instance view of the dedicated hosts under the dedicated host group. + Hosts *[]DedicatedHostInstanceViewWithName `json:"hosts,omitempty"` +} + +// DedicatedHostGroupListResult the List Dedicated Host Group with resource group response. +type DedicatedHostGroupListResult struct { + autorest.Response `json:"-"` + // Value - The list of dedicated host groups + Value *[]DedicatedHostGroup `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of Dedicated Host Groups. Call ListNext() with this URI to fetch the next page of Dedicated Host Groups. + NextLink *string `json:"nextLink,omitempty"` +} + +// DedicatedHostGroupListResultIterator provides access to a complete listing of DedicatedHostGroup values. +type DedicatedHostGroupListResultIterator struct { + i int + page DedicatedHostGroupListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DedicatedHostGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DedicatedHostGroupListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DedicatedHostGroupListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DedicatedHostGroupListResultIterator) Response() DedicatedHostGroupListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DedicatedHostGroupListResultIterator) Value() DedicatedHostGroup { + if !iter.page.NotDone() { + return DedicatedHostGroup{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DedicatedHostGroupListResultIterator type. +func NewDedicatedHostGroupListResultIterator(page DedicatedHostGroupListResultPage) DedicatedHostGroupListResultIterator { + return DedicatedHostGroupListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dhglr DedicatedHostGroupListResult) IsEmpty() bool { + return dhglr.Value == nil || len(*dhglr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dhglr DedicatedHostGroupListResult) hasNextLink() bool { + return dhglr.NextLink != nil && len(*dhglr.NextLink) != 0 +} + +// dedicatedHostGroupListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dhglr DedicatedHostGroupListResult) dedicatedHostGroupListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dhglr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dhglr.NextLink))) +} + +// DedicatedHostGroupListResultPage contains a page of DedicatedHostGroup values. +type DedicatedHostGroupListResultPage struct { + fn func(context.Context, DedicatedHostGroupListResult) (DedicatedHostGroupListResult, error) + dhglr DedicatedHostGroupListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DedicatedHostGroupListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dhglr) + if err != nil { + return err + } + page.dhglr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DedicatedHostGroupListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DedicatedHostGroupListResultPage) NotDone() bool { + return !page.dhglr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DedicatedHostGroupListResultPage) Response() DedicatedHostGroupListResult { + return page.dhglr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DedicatedHostGroupListResultPage) Values() []DedicatedHostGroup { + if page.dhglr.IsEmpty() { + return nil + } + return *page.dhglr.Value +} + +// Creates a new instance of the DedicatedHostGroupListResultPage type. +func NewDedicatedHostGroupListResultPage(cur DedicatedHostGroupListResult, getNextPage func(context.Context, DedicatedHostGroupListResult) (DedicatedHostGroupListResult, error)) DedicatedHostGroupListResultPage { + return DedicatedHostGroupListResultPage{ + fn: getNextPage, + dhglr: cur, + } +} + +// DedicatedHostGroupProperties dedicated Host Group Properties. +type DedicatedHostGroupProperties struct { + // PlatformFaultDomainCount - Number of fault domains that the host group can span. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + // Hosts - READ-ONLY; A list of references to all dedicated hosts in the dedicated host group. + Hosts *[]SubResourceReadOnly `json:"hosts,omitempty"` + // InstanceView - READ-ONLY; The dedicated host group instance view, which has the list of instance view of the dedicated hosts under the dedicated host group. + InstanceView *DedicatedHostGroupInstanceView `json:"instanceView,omitempty"` + // SupportAutomaticPlacement - Specifies whether virtual machines or virtual machine scale sets can be placed automatically on the dedicated host group. Automatic placement means resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to 'false' when not provided.

Minimum api-version: 2020-06-01. + SupportAutomaticPlacement *bool `json:"supportAutomaticPlacement,omitempty"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostGroupProperties. +func (dhgp DedicatedHostGroupProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhgp.PlatformFaultDomainCount != nil { + objectMap["platformFaultDomainCount"] = dhgp.PlatformFaultDomainCount + } + if dhgp.SupportAutomaticPlacement != nil { + objectMap["supportAutomaticPlacement"] = dhgp.SupportAutomaticPlacement + } + return json.Marshal(objectMap) +} + +// DedicatedHostGroupUpdate specifies information about the dedicated host group that the dedicated host +// should be assigned to. Only tags may be updated. +type DedicatedHostGroupUpdate struct { + *DedicatedHostGroupProperties `json:"properties,omitempty"` + // Zones - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. If not provided, the group supports all zones in the region. If provided, enforces each host in the group to be in the same zone. + Zones *[]string `json:"zones,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostGroupUpdate. +func (dhgu DedicatedHostGroupUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhgu.DedicatedHostGroupProperties != nil { + objectMap["properties"] = dhgu.DedicatedHostGroupProperties + } + if dhgu.Zones != nil { + objectMap["zones"] = dhgu.Zones + } + if dhgu.Tags != nil { + objectMap["tags"] = dhgu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DedicatedHostGroupUpdate struct. +func (dhgu *DedicatedHostGroupUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var dedicatedHostGroupProperties DedicatedHostGroupProperties + err = json.Unmarshal(*v, &dedicatedHostGroupProperties) + if err != nil { + return err + } + dhgu.DedicatedHostGroupProperties = &dedicatedHostGroupProperties + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + dhgu.Zones = &zones + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dhgu.Tags = tags + } + } + } + + return nil +} + +// DedicatedHostInstanceView the instance view of a dedicated host. +type DedicatedHostInstanceView struct { + // AssetID - READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides. + AssetID *string `json:"assetId,omitempty"` + // AvailableCapacity - Unutilized capacity of the dedicated host. + AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostInstanceView. +func (dhiv DedicatedHostInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhiv.AvailableCapacity != nil { + objectMap["availableCapacity"] = dhiv.AvailableCapacity + } + if dhiv.Statuses != nil { + objectMap["statuses"] = dhiv.Statuses + } + return json.Marshal(objectMap) +} + +// DedicatedHostInstanceViewWithName the instance view of a dedicated host that includes the name of the +// dedicated host. It is used for the response to the instance view of a dedicated host group. +type DedicatedHostInstanceViewWithName struct { + // Name - READ-ONLY; The name of the dedicated host. + Name *string `json:"name,omitempty"` + // AssetID - READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides. + AssetID *string `json:"assetId,omitempty"` + // AvailableCapacity - Unutilized capacity of the dedicated host. + AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostInstanceViewWithName. +func (dhivwn DedicatedHostInstanceViewWithName) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhivwn.AvailableCapacity != nil { + objectMap["availableCapacity"] = dhivwn.AvailableCapacity + } + if dhivwn.Statuses != nil { + objectMap["statuses"] = dhivwn.Statuses + } + return json.Marshal(objectMap) +} + +// DedicatedHostListResult the list dedicated host operation response. +type DedicatedHostListResult struct { + autorest.Response `json:"-"` + // Value - The list of dedicated hosts + Value *[]DedicatedHost `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of dedicated hosts. Call ListNext() with this URI to fetch the next page of dedicated hosts. + NextLink *string `json:"nextLink,omitempty"` +} + +// DedicatedHostListResultIterator provides access to a complete listing of DedicatedHost values. +type DedicatedHostListResultIterator struct { + i int + page DedicatedHostListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DedicatedHostListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DedicatedHostListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DedicatedHostListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DedicatedHostListResultIterator) Response() DedicatedHostListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DedicatedHostListResultIterator) Value() DedicatedHost { + if !iter.page.NotDone() { + return DedicatedHost{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DedicatedHostListResultIterator type. +func NewDedicatedHostListResultIterator(page DedicatedHostListResultPage) DedicatedHostListResultIterator { + return DedicatedHostListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dhlr DedicatedHostListResult) IsEmpty() bool { + return dhlr.Value == nil || len(*dhlr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dhlr DedicatedHostListResult) hasNextLink() bool { + return dhlr.NextLink != nil && len(*dhlr.NextLink) != 0 +} + +// dedicatedHostListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dhlr DedicatedHostListResult) dedicatedHostListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dhlr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dhlr.NextLink))) +} + +// DedicatedHostListResultPage contains a page of DedicatedHost values. +type DedicatedHostListResultPage struct { + fn func(context.Context, DedicatedHostListResult) (DedicatedHostListResult, error) + dhlr DedicatedHostListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DedicatedHostListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dhlr) + if err != nil { + return err + } + page.dhlr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DedicatedHostListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DedicatedHostListResultPage) NotDone() bool { + return !page.dhlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DedicatedHostListResultPage) Response() DedicatedHostListResult { + return page.dhlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DedicatedHostListResultPage) Values() []DedicatedHost { + if page.dhlr.IsEmpty() { + return nil + } + return *page.dhlr.Value +} + +// Creates a new instance of the DedicatedHostListResultPage type. +func NewDedicatedHostListResultPage(cur DedicatedHostListResult, getNextPage func(context.Context, DedicatedHostListResult) (DedicatedHostListResult, error)) DedicatedHostListResultPage { + return DedicatedHostListResultPage{ + fn: getNextPage, + dhlr: cur, + } +} + +// DedicatedHostProperties properties of the dedicated host. +type DedicatedHostProperties struct { + // PlatformFaultDomain - Fault domain of the dedicated host within a dedicated host group. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // AutoReplaceOnFailure - Specifies whether the dedicated host should be replaced automatically in case of a failure. The value is defaulted to 'true' when not provided. + AutoReplaceOnFailure *bool `json:"autoReplaceOnFailure,omitempty"` + // HostID - READ-ONLY; A unique id generated and assigned to the dedicated host by the platform.

Does not change throughout the lifetime of the host. + HostID *string `json:"hostId,omitempty"` + // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the Dedicated Host. + VirtualMachines *[]SubResourceReadOnly `json:"virtualMachines,omitempty"` + // LicenseType - Specifies the software license type that will be applied to the VMs deployed on the dedicated host.

Possible values are:

**None**

**Windows_Server_Hybrid**

**Windows_Server_Perpetual**

Default: **None**. Possible values include: 'DedicatedHostLicenseTypesNone', 'DedicatedHostLicenseTypesWindowsServerHybrid', 'DedicatedHostLicenseTypesWindowsServerPerpetual' + LicenseType DedicatedHostLicenseTypes `json:"licenseType,omitempty"` + // ProvisioningTime - READ-ONLY; The date when the host was first provisioned. + ProvisioningTime *date.Time `json:"provisioningTime,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // InstanceView - READ-ONLY; The dedicated host instance view. + InstanceView *DedicatedHostInstanceView `json:"instanceView,omitempty"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostProperties. +func (dhp DedicatedHostProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhp.PlatformFaultDomain != nil { + objectMap["platformFaultDomain"] = dhp.PlatformFaultDomain + } + if dhp.AutoReplaceOnFailure != nil { + objectMap["autoReplaceOnFailure"] = dhp.AutoReplaceOnFailure + } + if dhp.LicenseType != "" { + objectMap["licenseType"] = dhp.LicenseType + } + return json.Marshal(objectMap) +} + +// DedicatedHostsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DedicatedHostsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DedicatedHostsClient) (DedicatedHost, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DedicatedHostsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DedicatedHostsCreateOrUpdateFuture.Result. +func (future *DedicatedHostsCreateOrUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + dh.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { + dh, err = client.CreateOrUpdateResponder(dh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") + } + } + return +} + +// DedicatedHostsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DedicatedHostsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DedicatedHostsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DedicatedHostsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DedicatedHostsDeleteFuture.Result. +func (future *DedicatedHostsDeleteFuture) result(client DedicatedHostsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DedicatedHostsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DedicatedHostsUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DedicatedHostsClient) (DedicatedHost, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DedicatedHostsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DedicatedHostsUpdateFuture.Result. +func (future *DedicatedHostsUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + dh.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent { + dh, err = client.UpdateResponder(dh.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", dh.Response.Response, "Failure responding to request") + } + } + return +} + +// DedicatedHostUpdate specifies information about the dedicated host. Only tags, autoReplaceOnFailure and +// licenseType may be updated. +type DedicatedHostUpdate struct { + *DedicatedHostProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DedicatedHostUpdate. +func (dhu DedicatedHostUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dhu.DedicatedHostProperties != nil { + objectMap["properties"] = dhu.DedicatedHostProperties + } + if dhu.Tags != nil { + objectMap["tags"] = dhu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DedicatedHostUpdate struct. +func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var dedicatedHostProperties DedicatedHostProperties + err = json.Unmarshal(*v, &dedicatedHostProperties) + if err != nil { + return err + } + dhu.DedicatedHostProperties = &dedicatedHostProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + dhu.Tags = tags + } + } + } + + return nil +} + +// DiagnosticsProfile specifies the boot diagnostic settings state.

Minimum api-version: +// 2015-06-15. +type DiagnosticsProfile struct { + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

You can easily view the output of your console log.

Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// DiffDiskSettings describes the parameters of ephemeral disk settings that can be specified for operating +// system disk.

NOTE: The ephemeral disk settings can only be specified for managed disk. +type DiffDiskSettings struct { + // Option - Specifies the ephemeral disk settings for operating system disk. Possible values include: 'DiffDiskOptionsLocal' + Option DiffDiskOptions `json:"option,omitempty"` + // Placement - Specifies the ephemeral disk placement for operating system disk.

Possible values are:

**CacheDisk**

**ResourceDisk**

Default: **CacheDisk** if one is configured for the VM size otherwise **ResourceDisk** is used.

Refer to VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Possible values include: 'DiffDiskPlacementCacheDisk', 'DiffDiskPlacementResourceDisk' + Placement DiffDiskPlacement `json:"placement,omitempty"` } // Disallowed describes the disallowed disk types. @@ -1634,63 +3970,2988 @@ type Disallowed struct { DiskTypes *[]string `json:"diskTypes,omitempty"` } -// DisallowedConfiguration specifies the disallowed configuration for a virtual machine image. -type DisallowedConfiguration struct { - // VMDiskType - VM disk types which are disallowed. Possible values include: 'VMDiskTypesNone', 'VMDiskTypesUnmanaged' - VMDiskType VMDiskTypes `json:"vmDiskType,omitempty"` +// DisallowedConfiguration specifies the disallowed configuration for a virtual machine image. +type DisallowedConfiguration struct { + // VMDiskType - VM disk types which are disallowed. Possible values include: 'VMDiskTypesNone', 'VMDiskTypesUnmanaged' + VMDiskType VMDiskTypes `json:"vmDiskType,omitempty"` +} + +// Disk disk resource. +type Disk struct { + autorest.Response `json:"-"` + // ManagedBy - READ-ONLY; A relative URI containing the ID of the VM that has the disk attached. + ManagedBy *string `json:"managedBy,omitempty"` + // ManagedByExtended - READ-ONLY; List of relative URIs containing the IDs of the VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs. + ManagedByExtended *[]string `json:"managedByExtended,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + // Zones - The Logical zone list for Disk. + Zones *[]string `json:"zones,omitempty"` + // ExtendedLocation - The extended location where the disk will be created. Extended location cannot be changed. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + *DiskProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Disk. +func (d Disk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if d.Sku != nil { + objectMap["sku"] = d.Sku + } + if d.Zones != nil { + objectMap["zones"] = d.Zones + } + if d.ExtendedLocation != nil { + objectMap["extendedLocation"] = d.ExtendedLocation + } + if d.DiskProperties != nil { + objectMap["properties"] = d.DiskProperties + } + if d.Location != nil { + objectMap["location"] = d.Location + } + if d.Tags != nil { + objectMap["tags"] = d.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Disk struct. +func (d *Disk) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + d.ManagedBy = &managedBy + } + case "managedByExtended": + if v != nil { + var managedByExtended []string + err = json.Unmarshal(*v, &managedByExtended) + if err != nil { + return err + } + d.ManagedByExtended = &managedByExtended + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + d.Sku = &sku + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + d.Zones = &zones + } + case "extendedLocation": + if v != nil { + var extendedLocation ExtendedLocation + err = json.Unmarshal(*v, &extendedLocation) + if err != nil { + return err + } + d.ExtendedLocation = &extendedLocation + } + case "properties": + if v != nil { + var diskProperties DiskProperties + err = json.Unmarshal(*v, &diskProperties) + if err != nil { + return err + } + d.DiskProperties = &diskProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + d.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + d.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + d.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + d.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + d.Tags = tags + } + } + } + + return nil +} + +// DiskAccess disk access resource. +type DiskAccess struct { + autorest.Response `json:"-"` + *DiskAccessProperties `json:"properties,omitempty"` + // ExtendedLocation - The extended location where the disk access will be created. Extended location cannot be changed. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DiskAccess. +func (da DiskAccess) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if da.DiskAccessProperties != nil { + objectMap["properties"] = da.DiskAccessProperties + } + if da.ExtendedLocation != nil { + objectMap["extendedLocation"] = da.ExtendedLocation + } + if da.Location != nil { + objectMap["location"] = da.Location + } + if da.Tags != nil { + objectMap["tags"] = da.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskAccess struct. +func (da *DiskAccess) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskAccessProperties DiskAccessProperties + err = json.Unmarshal(*v, &diskAccessProperties) + if err != nil { + return err + } + da.DiskAccessProperties = &diskAccessProperties + } + case "extendedLocation": + if v != nil { + var extendedLocation ExtendedLocation + err = json.Unmarshal(*v, &extendedLocation) + if err != nil { + return err + } + da.ExtendedLocation = &extendedLocation + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + da.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + da.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + da.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + da.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + da.Tags = tags + } + } + } + + return nil +} + +// DiskAccessesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskAccessesCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskAccessesClient) (DiskAccess, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesCreateOrUpdateFuture.Result. +func (future *DiskAccessesCreateOrUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + da.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent { + da, err = client.CreateOrUpdateResponder(da.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", da.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskAccessesDeleteAPrivateEndpointConnectionFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type DiskAccessesDeleteAPrivateEndpointConnectionFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskAccessesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesDeleteAPrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesDeleteAPrivateEndpointConnectionFuture.Result. +func (future *DiskAccessesDeleteAPrivateEndpointConnectionFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteAPrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteAPrivateEndpointConnectionFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskAccessesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskAccessesDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskAccessesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesDeleteFuture.Result. +func (future *DiskAccessesDeleteFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskAccessesUpdateAPrivateEndpointConnectionFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type DiskAccessesUpdateAPrivateEndpointConnectionFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskAccessesClient) (PrivateEndpointConnection, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesUpdateAPrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesUpdateAPrivateEndpointConnectionFuture.Result. +func (future *DiskAccessesUpdateAPrivateEndpointConnectionFuture) result(client DiskAccessesClient) (pec PrivateEndpointConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + pec.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pec.Response.Response, err = future.GetResult(sender); err == nil && pec.Response.Response.StatusCode != http.StatusNoContent { + pec, err = client.UpdateAPrivateEndpointConnectionResponder(pec.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture", "Result", pec.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskAccessesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DiskAccessesUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskAccessesClient) (DiskAccess, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskAccessesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskAccessesUpdateFuture.Result. +func (future *DiskAccessesUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + da.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent { + da, err = client.UpdateResponder(da.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", da.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskAccessList the List disk access operation response. +type DiskAccessList struct { + autorest.Response `json:"-"` + // Value - A list of disk access resources. + Value *[]DiskAccess `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disk access resources. Call ListNext() with this to fetch the next page of disk access resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskAccessListIterator provides access to a complete listing of DiskAccess values. +type DiskAccessListIterator struct { + i int + page DiskAccessListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskAccessListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskAccessListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskAccessListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskAccessListIterator) Response() DiskAccessList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskAccessListIterator) Value() DiskAccess { + if !iter.page.NotDone() { + return DiskAccess{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskAccessListIterator type. +func NewDiskAccessListIterator(page DiskAccessListPage) DiskAccessListIterator { + return DiskAccessListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dal DiskAccessList) IsEmpty() bool { + return dal.Value == nil || len(*dal.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dal DiskAccessList) hasNextLink() bool { + return dal.NextLink != nil && len(*dal.NextLink) != 0 +} + +// diskAccessListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dal DiskAccessList) diskAccessListPreparer(ctx context.Context) (*http.Request, error) { + if !dal.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dal.NextLink))) +} + +// DiskAccessListPage contains a page of DiskAccess values. +type DiskAccessListPage struct { + fn func(context.Context, DiskAccessList) (DiskAccessList, error) + dal DiskAccessList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskAccessListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dal) + if err != nil { + return err + } + page.dal = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskAccessListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskAccessListPage) NotDone() bool { + return !page.dal.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskAccessListPage) Response() DiskAccessList { + return page.dal +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskAccessListPage) Values() []DiskAccess { + if page.dal.IsEmpty() { + return nil + } + return *page.dal.Value +} + +// Creates a new instance of the DiskAccessListPage type. +func NewDiskAccessListPage(cur DiskAccessList, getNextPage func(context.Context, DiskAccessList) (DiskAccessList, error)) DiskAccessListPage { + return DiskAccessListPage{ + fn: getNextPage, + dal: cur, + } +} + +// DiskAccessProperties ... +type DiskAccessProperties struct { + // PrivateEndpointConnections - READ-ONLY; A readonly collection of private endpoint connections created on the disk. Currently only one endpoint connection is supported. + PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` + // ProvisioningState - READ-ONLY; The disk access resource provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // TimeCreated - READ-ONLY; The time when the disk access was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskAccessProperties. +func (dap DiskAccessProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DiskAccessUpdate used for updating a disk access resource. +type DiskAccessUpdate struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DiskAccessUpdate. +func (dau DiskAccessUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dau.Tags != nil { + objectMap["tags"] = dau.Tags + } + return json.Marshal(objectMap) +} + +// DiskEncryptionSet disk encryption set resource. +type DiskEncryptionSet struct { + autorest.Response `json:"-"` + Identity *EncryptionSetIdentity `json:"identity,omitempty"` + *EncryptionSetProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DiskEncryptionSet. +func (desVar DiskEncryptionSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if desVar.Identity != nil { + objectMap["identity"] = desVar.Identity + } + if desVar.EncryptionSetProperties != nil { + objectMap["properties"] = desVar.EncryptionSetProperties + } + if desVar.Location != nil { + objectMap["location"] = desVar.Location + } + if desVar.Tags != nil { + objectMap["tags"] = desVar.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSet struct. +func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "identity": + if v != nil { + var identity EncryptionSetIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + desVar.Identity = &identity + } + case "properties": + if v != nil { + var encryptionSetProperties EncryptionSetProperties + err = json.Unmarshal(*v, &encryptionSetProperties) + if err != nil { + return err + } + desVar.EncryptionSetProperties = &encryptionSetProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + desVar.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + desVar.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + desVar.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + desVar.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + desVar.Tags = tags + } + } + } + + return nil +} + +// DiskEncryptionSetList the List disk encryption set operation response. +type DiskEncryptionSetList struct { + autorest.Response `json:"-"` + // Value - A list of disk encryption sets. + Value *[]DiskEncryptionSet `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disk encryption sets. Call ListNext() with this to fetch the next page of disk encryption sets. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskEncryptionSetListIterator provides access to a complete listing of DiskEncryptionSet values. +type DiskEncryptionSetListIterator struct { + i int + page DiskEncryptionSetListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskEncryptionSetListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskEncryptionSetListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskEncryptionSetListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskEncryptionSetListIterator) Response() DiskEncryptionSetList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskEncryptionSetListIterator) Value() DiskEncryptionSet { + if !iter.page.NotDone() { + return DiskEncryptionSet{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskEncryptionSetListIterator type. +func NewDiskEncryptionSetListIterator(page DiskEncryptionSetListPage) DiskEncryptionSetListIterator { + return DiskEncryptionSetListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (desl DiskEncryptionSetList) IsEmpty() bool { + return desl.Value == nil || len(*desl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (desl DiskEncryptionSetList) hasNextLink() bool { + return desl.NextLink != nil && len(*desl.NextLink) != 0 +} + +// diskEncryptionSetListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (desl DiskEncryptionSetList) diskEncryptionSetListPreparer(ctx context.Context) (*http.Request, error) { + if !desl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(desl.NextLink))) +} + +// DiskEncryptionSetListPage contains a page of DiskEncryptionSet values. +type DiskEncryptionSetListPage struct { + fn func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error) + desl DiskEncryptionSetList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskEncryptionSetListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.desl) + if err != nil { + return err + } + page.desl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskEncryptionSetListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskEncryptionSetListPage) NotDone() bool { + return !page.desl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskEncryptionSetListPage) Response() DiskEncryptionSetList { + return page.desl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskEncryptionSetListPage) Values() []DiskEncryptionSet { + if page.desl.IsEmpty() { + return nil + } + return *page.desl.Value +} + +// Creates a new instance of the DiskEncryptionSetListPage type. +func NewDiskEncryptionSetListPage(cur DiskEncryptionSetList, getNextPage func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error)) DiskEncryptionSetListPage { + return DiskEncryptionSetListPage{ + fn: getNextPage, + desl: cur, + } +} + +// DiskEncryptionSetParameters describes the parameter of customer managed disk encryption set resource id +// that can be specified for disk.

NOTE: The disk encryption set resource id can only be specified +// for managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more details. +type DiskEncryptionSetParameters struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// DiskEncryptionSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskEncryptionSetsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskEncryptionSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskEncryptionSetsCreateOrUpdateFuture.Result. +func (future *DiskEncryptionSetsCreateOrUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + desVar.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { + desVar, err = client.CreateOrUpdateResponder(desVar.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskEncryptionSetsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskEncryptionSetsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskEncryptionSetsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskEncryptionSetsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskEncryptionSetsDeleteFuture.Result. +func (future *DiskEncryptionSetsDeleteFuture) result(client DiskEncryptionSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskEncryptionSetsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskEncryptionSetsUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskEncryptionSetsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskEncryptionSetsUpdateFuture.Result. +func (future *DiskEncryptionSetsUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + desVar.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { + desVar, err = client.UpdateResponder(desVar.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskEncryptionSettings describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. + Enabled *bool `json:"enabled,omitempty"` +} + +// DiskEncryptionSetUpdate disk encryption set update resource. +type DiskEncryptionSetUpdate struct { + *DiskEncryptionSetUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Identity *EncryptionSetIdentity `json:"identity,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskEncryptionSetUpdate. +func (desu DiskEncryptionSetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if desu.DiskEncryptionSetUpdateProperties != nil { + objectMap["properties"] = desu.DiskEncryptionSetUpdateProperties + } + if desu.Tags != nil { + objectMap["tags"] = desu.Tags + } + if desu.Identity != nil { + objectMap["identity"] = desu.Identity + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSetUpdate struct. +func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskEncryptionSetUpdateProperties DiskEncryptionSetUpdateProperties + err = json.Unmarshal(*v, &diskEncryptionSetUpdateProperties) + if err != nil { + return err + } + desu.DiskEncryptionSetUpdateProperties = &diskEncryptionSetUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + desu.Tags = tags + } + case "identity": + if v != nil { + var identity EncryptionSetIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + desu.Identity = &identity + } + } + } + + return nil +} + +// DiskEncryptionSetUpdateProperties disk encryption set resource update properties. +type DiskEncryptionSetUpdateProperties struct { + // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys' + EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"` + ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"` + // RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version. + RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"` +} + +// DiskImageEncryption this is the disk image encryption base class. +type DiskImageEncryption struct { + // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` +} + +// DiskInstanceView the instance view of the disk. +type DiskInstanceView struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 + EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + // Value - A list of disks. + Value *[]Disk `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListIterator provides access to a complete listing of Disk values. +type DiskListIterator struct { + i int + page DiskListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskListIterator) Response() DiskList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskListIterator) Value() Disk { + if !iter.page.NotDone() { + return Disk{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskListIterator type. +func NewDiskListIterator(page DiskListPage) DiskListIterator { + return DiskListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dl DiskList) IsEmpty() bool { + return dl.Value == nil || len(*dl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dl DiskList) hasNextLink() bool { + return dl.NextLink != nil && len(*dl.NextLink) != 0 +} + +// diskListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dl DiskList) diskListPreparer(ctx context.Context) (*http.Request, error) { + if !dl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dl.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(context.Context, DiskList) (DiskList, error) + dl DiskList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dl) + if err != nil { + return err + } + page.dl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} + +// Creates a new instance of the DiskListPage type. +func NewDiskListPage(cur DiskList, getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage { + return DiskListPage{ + fn: getNextPage, + dl: cur, + } +} + +// DiskProperties disk resource properties. +type DiskProperties struct { + // TimeCreated - READ-ONLY; The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // PurchasePlan - Purchase plan information for the the image from which the OS disk was created. E.g. - {name: 2019-Datacenter, publisher: MicrosoftWindowsServer, product: WindowsServer} + PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` + // SupportedCapabilities - List of supported capabilities for the image from which the OS disk was created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only. + DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"` + // UniqueID - READ-ONLY; Unique Guid identifying the resource. + UniqueID *string `json:"uniqueId,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // ProvisioningState - READ-ONLY; The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` + // DiskIOPSReadOnly - The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"` + // DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"` + // DiskState - The state of the disk. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateFrozen', 'DiskStateActiveSAS', 'DiskStateActiveSASFrozen', 'DiskStateReadyToUpload', 'DiskStateActiveUpload' + DiskState DiskState `json:"diskState,omitempty"` + // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + // MaxShares - The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + MaxShares *int32 `json:"maxShares,omitempty"` + // ShareInfo - READ-ONLY; Details of the list of all VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs. + ShareInfo *[]ShareInfoElement `json:"shareInfo,omitempty"` + // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll' + NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + // Tier - Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks. + Tier *string `json:"tier,omitempty"` + // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. + BurstingEnabled *bool `json:"burstingEnabled,omitempty"` + // PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending. + PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"` + // SupportsHibernation - Indicates the OS on a disk supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + // SecurityProfile - Contains the security related information for the resource. + SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"` + // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation. + CompletionPercent *float64 `json:"completionPercent,omitempty"` + // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled' + PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskProperties. +func (dp DiskProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dp.OsType != "" { + objectMap["osType"] = dp.OsType + } + if dp.HyperVGeneration != "" { + objectMap["hyperVGeneration"] = dp.HyperVGeneration + } + if dp.PurchasePlan != nil { + objectMap["purchasePlan"] = dp.PurchasePlan + } + if dp.SupportedCapabilities != nil { + objectMap["supportedCapabilities"] = dp.SupportedCapabilities + } + if dp.CreationData != nil { + objectMap["creationData"] = dp.CreationData + } + if dp.DiskSizeGB != nil { + objectMap["diskSizeGB"] = dp.DiskSizeGB + } + if dp.EncryptionSettingsCollection != nil { + objectMap["encryptionSettingsCollection"] = dp.EncryptionSettingsCollection + } + if dp.DiskIOPSReadWrite != nil { + objectMap["diskIOPSReadWrite"] = dp.DiskIOPSReadWrite + } + if dp.DiskMBpsReadWrite != nil { + objectMap["diskMBpsReadWrite"] = dp.DiskMBpsReadWrite + } + if dp.DiskIOPSReadOnly != nil { + objectMap["diskIOPSReadOnly"] = dp.DiskIOPSReadOnly + } + if dp.DiskMBpsReadOnly != nil { + objectMap["diskMBpsReadOnly"] = dp.DiskMBpsReadOnly + } + if dp.DiskState != "" { + objectMap["diskState"] = dp.DiskState + } + if dp.Encryption != nil { + objectMap["encryption"] = dp.Encryption + } + if dp.MaxShares != nil { + objectMap["maxShares"] = dp.MaxShares + } + if dp.NetworkAccessPolicy != "" { + objectMap["networkAccessPolicy"] = dp.NetworkAccessPolicy + } + if dp.DiskAccessID != nil { + objectMap["diskAccessId"] = dp.DiskAccessID + } + if dp.Tier != nil { + objectMap["tier"] = dp.Tier + } + if dp.BurstingEnabled != nil { + objectMap["burstingEnabled"] = dp.BurstingEnabled + } + if dp.SupportsHibernation != nil { + objectMap["supportsHibernation"] = dp.SupportsHibernation + } + if dp.SecurityProfile != nil { + objectMap["securityProfile"] = dp.SecurityProfile + } + if dp.CompletionPercent != nil { + objectMap["completionPercent"] = dp.CompletionPercent + } + if dp.PublicNetworkAccess != "" { + objectMap["publicNetworkAccess"] = dp.PublicNetworkAccess + } + return json.Marshal(objectMap) +} + +// DiskRestorePoint properties of disk restore point +type DiskRestorePoint struct { + autorest.Response `json:"-"` + *DiskRestorePointProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskRestorePoint. +func (drp DiskRestorePoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if drp.DiskRestorePointProperties != nil { + objectMap["properties"] = drp.DiskRestorePointProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskRestorePoint struct. +func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskRestorePointProperties DiskRestorePointProperties + err = json.Unmarshal(*v, &diskRestorePointProperties) + if err != nil { + return err + } + drp.DiskRestorePointProperties = &diskRestorePointProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + drp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + drp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + drp.Type = &typeVar + } + } + } + + return nil +} + +// DiskRestorePointGrantAccessFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskRestorePointGrantAccessFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskRestorePointClient) (AccessURI, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskRestorePointGrantAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskRestorePointGrantAccessFuture.Result. +func (future *DiskRestorePointGrantAccessFuture) result(client DiskRestorePointClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskRestorePointGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + au.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskRestorePointGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskRestorePointGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskRestorePointList the List Disk Restore Points operation response. +type DiskRestorePointList struct { + autorest.Response `json:"-"` + // Value - A list of disk restore points. + Value *[]DiskRestorePoint `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disk restore points. Call ListNext() with this to fetch the next page of disk restore points. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskRestorePointListIterator provides access to a complete listing of DiskRestorePoint values. +type DiskRestorePointListIterator struct { + i int + page DiskRestorePointListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskRestorePointListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskRestorePointListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskRestorePointListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskRestorePointListIterator) Response() DiskRestorePointList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskRestorePointListIterator) Value() DiskRestorePoint { + if !iter.page.NotDone() { + return DiskRestorePoint{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskRestorePointListIterator type. +func NewDiskRestorePointListIterator(page DiskRestorePointListPage) DiskRestorePointListIterator { + return DiskRestorePointListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (drpl DiskRestorePointList) IsEmpty() bool { + return drpl.Value == nil || len(*drpl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (drpl DiskRestorePointList) hasNextLink() bool { + return drpl.NextLink != nil && len(*drpl.NextLink) != 0 +} + +// diskRestorePointListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (drpl DiskRestorePointList) diskRestorePointListPreparer(ctx context.Context) (*http.Request, error) { + if !drpl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(drpl.NextLink))) +} + +// DiskRestorePointListPage contains a page of DiskRestorePoint values. +type DiskRestorePointListPage struct { + fn func(context.Context, DiskRestorePointList) (DiskRestorePointList, error) + drpl DiskRestorePointList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskRestorePointListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.drpl) + if err != nil { + return err + } + page.drpl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskRestorePointListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskRestorePointListPage) NotDone() bool { + return !page.drpl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskRestorePointListPage) Response() DiskRestorePointList { + return page.drpl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskRestorePointListPage) Values() []DiskRestorePoint { + if page.drpl.IsEmpty() { + return nil + } + return *page.drpl.Value +} + +// Creates a new instance of the DiskRestorePointListPage type. +func NewDiskRestorePointListPage(cur DiskRestorePointList, getNextPage func(context.Context, DiskRestorePointList) (DiskRestorePointList, error)) DiskRestorePointListPage { + return DiskRestorePointListPage{ + fn: getNextPage, + drpl: cur, + } +} + +// DiskRestorePointProperties properties of an incremental disk restore point +type DiskRestorePointProperties struct { + // TimeCreated - READ-ONLY; The timestamp of restorePoint creation + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // SourceResourceID - READ-ONLY; arm id of source disk + SourceResourceID *string `json:"sourceResourceId,omitempty"` + // OsType - READ-ONLY; The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // PurchasePlan - Purchase plan information for the the image from which the OS disk was created. + PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` + // SupportedCapabilities - List of supported capabilities (like accelerated networking) for the image from which the OS disk was created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + // FamilyID - READ-ONLY; id of the backing snapshot's MIS family + FamilyID *string `json:"familyId,omitempty"` + // SourceUniqueID - READ-ONLY; unique incarnation id of the source disk + SourceUniqueID *string `json:"sourceUniqueId,omitempty"` + // Encryption - READ-ONLY; Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + // SupportsHibernation - Indicates the OS on a disk supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll' + NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled' + PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation. + CompletionPercent *float64 `json:"completionPercent,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskRestorePointProperties. +func (drpp DiskRestorePointProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if drpp.HyperVGeneration != "" { + objectMap["hyperVGeneration"] = drpp.HyperVGeneration + } + if drpp.PurchasePlan != nil { + objectMap["purchasePlan"] = drpp.PurchasePlan + } + if drpp.SupportedCapabilities != nil { + objectMap["supportedCapabilities"] = drpp.SupportedCapabilities + } + if drpp.SupportsHibernation != nil { + objectMap["supportsHibernation"] = drpp.SupportsHibernation + } + if drpp.NetworkAccessPolicy != "" { + objectMap["networkAccessPolicy"] = drpp.NetworkAccessPolicy + } + if drpp.PublicNetworkAccess != "" { + objectMap["publicNetworkAccess"] = drpp.PublicNetworkAccess + } + if drpp.DiskAccessID != nil { + objectMap["diskAccessId"] = drpp.DiskAccessID + } + if drpp.CompletionPercent != nil { + objectMap["completionPercent"] = drpp.CompletionPercent + } + return json.Marshal(objectMap) +} + +// DiskRestorePointRevokeAccessFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DiskRestorePointRevokeAccessFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DiskRestorePointClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DiskRestorePointRevokeAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DiskRestorePointRevokeAccessFuture.Result. +func (future *DiskRestorePointRevokeAccessFuture) result(client DiskRestorePointClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DiskRestorePointRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DiskRestorePointRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DisksClient) (Disk, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksCreateOrUpdateFuture.Result. +func (future *DisksCreateOrUpdateFuture) result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + d.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DisksClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksDeleteFuture.Result. +func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DiskSecurityProfile contains the security related information for the resource. +type DiskSecurityProfile struct { + // SecurityType - Possible values include: 'DiskSecurityTypesTrustedLaunch' + SecurityType DiskSecurityTypes `json:"securityType,omitempty"` +} + +// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksGrantAccessFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DisksClient) (AccessURI, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksGrantAccessFuture.Result. +func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + au.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS, +// Premium_ZRS, or StandardSSD_ZRS. +type DiskSku struct { + // Name - The sku name. Possible values include: 'DiskStorageAccountTypesStandardLRS', 'DiskStorageAccountTypesPremiumLRS', 'DiskStorageAccountTypesStandardSSDLRS', 'DiskStorageAccountTypesUltraSSDLRS', 'DiskStorageAccountTypesPremiumZRS', 'DiskStorageAccountTypesStandardSSDZRS' + Name DiskStorageAccountTypes `json:"name,omitempty"` + // Tier - READ-ONLY; The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskSku. +func (ds DiskSku) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ds.Name != "" { + objectMap["name"] = ds.Name + } + return json.Marshal(objectMap) +} + +// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksRevokeAccessFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DisksClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksRevokeAccessFuture.Result. +func (future *DisksRevokeAccessFuture) result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(DisksClient) (Disk, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for DisksUpdateFuture.Result. +func (future *DisksUpdateFuture) result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + d.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskUpdate disk update resource. +type DiskUpdate struct { + *DiskUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskUpdate. +func (du DiskUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if du.DiskUpdateProperties != nil { + objectMap["properties"] = du.DiskUpdateProperties + } + if du.Tags != nil { + objectMap["tags"] = du.Tags + } + if du.Sku != nil { + objectMap["sku"] = du.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. +func (du *DiskUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskUpdateProperties DiskUpdateProperties + err = json.Unmarshal(*v, &diskUpdateProperties) + if err != nil { + return err + } + du.DiskUpdateProperties = &diskUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + du.Tags = tags + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + du.Sku = &sku + } + } + } + + return nil +} + +// DiskUpdateProperties disk resource update properties. +type DiskUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` + // DiskIOPSReadOnly - The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"` + // DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"` + // MaxShares - The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + MaxShares *int32 `json:"maxShares,omitempty"` + // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll' + NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + // Tier - Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks. + Tier *string `json:"tier,omitempty"` + // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. + BurstingEnabled *bool `json:"burstingEnabled,omitempty"` + // PurchasePlan - Purchase plan information to be added on the OS disk + PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` + // SupportedCapabilities - List of supported capabilities (like accelerated networking) to be added on the OS disk. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + // PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending. + PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"` + // SupportsHibernation - Indicates the OS on a disk supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled' + PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskUpdateProperties. +func (dup DiskUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dup.OsType != "" { + objectMap["osType"] = dup.OsType + } + if dup.DiskSizeGB != nil { + objectMap["diskSizeGB"] = dup.DiskSizeGB + } + if dup.EncryptionSettingsCollection != nil { + objectMap["encryptionSettingsCollection"] = dup.EncryptionSettingsCollection + } + if dup.DiskIOPSReadWrite != nil { + objectMap["diskIOPSReadWrite"] = dup.DiskIOPSReadWrite + } + if dup.DiskMBpsReadWrite != nil { + objectMap["diskMBpsReadWrite"] = dup.DiskMBpsReadWrite + } + if dup.DiskIOPSReadOnly != nil { + objectMap["diskIOPSReadOnly"] = dup.DiskIOPSReadOnly + } + if dup.DiskMBpsReadOnly != nil { + objectMap["diskMBpsReadOnly"] = dup.DiskMBpsReadOnly + } + if dup.MaxShares != nil { + objectMap["maxShares"] = dup.MaxShares + } + if dup.Encryption != nil { + objectMap["encryption"] = dup.Encryption + } + if dup.NetworkAccessPolicy != "" { + objectMap["networkAccessPolicy"] = dup.NetworkAccessPolicy + } + if dup.DiskAccessID != nil { + objectMap["diskAccessId"] = dup.DiskAccessID + } + if dup.Tier != nil { + objectMap["tier"] = dup.Tier + } + if dup.BurstingEnabled != nil { + objectMap["burstingEnabled"] = dup.BurstingEnabled + } + if dup.PurchasePlan != nil { + objectMap["purchasePlan"] = dup.PurchasePlan + } + if dup.SupportedCapabilities != nil { + objectMap["supportedCapabilities"] = dup.SupportedCapabilities + } + if dup.SupportsHibernation != nil { + objectMap["supportsHibernation"] = dup.SupportsHibernation + } + if dup.PublicNetworkAccess != "" { + objectMap["publicNetworkAccess"] = dup.PublicNetworkAccess + } + return json.Marshal(objectMap) +} + +// Encryption encryption at rest settings for disk or snapshot +type Encryption struct { + // DiskEncryptionSetID - ResourceId of the disk encryption set to use for enabling encryption at rest. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` + // Type - Possible values include: 'EncryptionTypeEncryptionAtRestWithPlatformKey', 'EncryptionTypeEncryptionAtRestWithCustomerKey', 'EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys' + Type EncryptionType `json:"type,omitempty"` +} + +// EncryptionImages optional. Allows users to provide customer managed keys for encrypting the OS and data +// disks in the gallery artifact. +type EncryptionImages struct { + OsDiskImage *OSDiskImageEncryption `json:"osDiskImage,omitempty"` + // DataDiskImages - A list of encryption specifications for data disk images. + DataDiskImages *[]DataDiskImageEncryption `json:"dataDiskImages,omitempty"` +} + +// EncryptionSetIdentity the managed identity for the disk encryption set. It should be given permission on +// the key vault before it can be used to encrypt disks. +type EncryptionSetIdentity struct { + // Type - The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. Possible values include: 'DiskEncryptionSetIdentityTypeSystemAssigned', 'DiskEncryptionSetIdentityTypeNone' + Type DiskEncryptionSetIdentityType `json:"type,omitempty"` + // PrincipalID - READ-ONLY; The object id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a systemAssigned(implicit) identity + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a systemAssigned(implicit) identity + TenantID *string `json:"tenantId,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionSetIdentity. +func (esi EncryptionSetIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if esi.Type != "" { + objectMap["type"] = esi.Type + } + return json.Marshal(objectMap) +} + +// EncryptionSetProperties ... +type EncryptionSetProperties struct { + // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys' + EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"` + // ActiveKey - The key vault key which is currently used by this disk encryption set. + ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"` + // PreviousKeys - READ-ONLY; A readonly collection of key vault keys previously used by this disk encryption set while a key rotation is in progress. It will be empty if there is no ongoing key rotation. + PreviousKeys *[]KeyForDiskEncryptionSet `json:"previousKeys,omitempty"` + // ProvisioningState - READ-ONLY; The disk encryption set provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version. + RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"` + // LastKeyRotationTimestamp - READ-ONLY; The time when the active key of this disk encryption set was updated. + LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"` + // AutoKeyRotationError - READ-ONLY; The error that was encountered during auto-key rotation. If an error is present, then auto-key rotation will not be attempted until the error on this disk encryption set is fixed. + AutoKeyRotationError *APIError `json:"autoKeyRotationError,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionSetProperties. +func (esp EncryptionSetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if esp.EncryptionType != "" { + objectMap["encryptionType"] = esp.EncryptionType + } + if esp.ActiveKey != nil { + objectMap["activeKey"] = esp.ActiveKey + } + if esp.RotationToLatestKeyVersionEnabled != nil { + objectMap["rotationToLatestKeyVersionEnabled"] = esp.RotationToLatestKeyVersionEnabled + } + return json.Marshal(objectMap) +} + +// EncryptionSettingsCollection encryption settings for disk or snapshot +type EncryptionSettingsCollection struct { + // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + // EncryptionSettings - A collection of encryption settings, one for each disk volume. + EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"` + // EncryptionSettingsVersion - Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption. + EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"` +} + +// EncryptionSettingsElement encryption settings for one disk volume. +type EncryptionSettingsElement struct { + // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// ExtendedLocation the complex type of the extended location. +type ExtendedLocation struct { + // Name - The name of the extended location. + Name *string `json:"name,omitempty"` + // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone' + Type ExtendedLocationTypes `json:"type,omitempty"` +} + +// Extension describes a cloud service Extension. +type Extension struct { + // Name - The name of the extension. + Name *string `json:"name,omitempty"` + Properties *CloudServiceExtensionProperties `json:"properties,omitempty"` +} + +// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleriesClient) (Gallery, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesCreateOrUpdateFuture.Result. +func (future *GalleriesCreateOrUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + g.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.CreateOrUpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleriesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesDeleteFuture.Result. +func (future *GalleriesDeleteFuture) result(client GalleriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleriesClient) (Gallery, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleriesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleriesUpdateFuture.Result. +func (future *GalleriesUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + g.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleriesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.UpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + +// Gallery specifies information about the Shared Image Gallery that you want to create or update. +type Gallery struct { + autorest.Response `json:"-"` + *GalleryProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Gallery. +func (g Gallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if g.GalleryProperties != nil { + objectMap["properties"] = g.GalleryProperties + } + if g.Location != nil { + objectMap["location"] = g.Location + } + if g.Tags != nil { + objectMap["tags"] = g.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Gallery struct. +func (g *Gallery) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryProperties GalleryProperties + err = json.Unmarshal(*v, &galleryProperties) + if err != nil { + return err + } + g.GalleryProperties = &galleryProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + g.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + g.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + g.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + g.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + g.Tags = tags + } + } + } + + return nil +} + +// GalleryApplication specifies information about the gallery Application Definition that you want to +// create or update. +type GalleryApplication struct { + autorest.Response `json:"-"` + *GalleryApplicationProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryApplication. +func (ga GalleryApplication) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ga.GalleryApplicationProperties != nil { + objectMap["properties"] = ga.GalleryApplicationProperties + } + if ga.Location != nil { + objectMap["location"] = ga.Location + } + if ga.Tags != nil { + objectMap["tags"] = ga.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryApplication struct. +func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryApplicationProperties GalleryApplicationProperties + err = json.Unmarshal(*v, &galleryApplicationProperties) + if err != nil { + return err + } + ga.GalleryApplicationProperties = &galleryApplicationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ga.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ga.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ga.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + ga.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + ga.Tags = tags + } + } + } + + return nil +} + +// GalleryApplicationList the List Gallery Applications operation response. +type GalleryApplicationList struct { + autorest.Response `json:"-"` + // Value - A list of Gallery Applications. + Value *[]GalleryApplication `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Application Definitions in the Application Gallery. Call ListNext() with this to fetch the next page of gallery Application Definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryApplicationListIterator provides access to a complete listing of GalleryApplication values. +type GalleryApplicationListIterator struct { + i int + page GalleryApplicationListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryApplicationListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *GalleryApplicationListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryApplicationListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryApplicationListIterator) Response() GalleryApplicationList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryApplicationListIterator) Value() GalleryApplication { + if !iter.page.NotDone() { + return GalleryApplication{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the GalleryApplicationListIterator type. +func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator { + return GalleryApplicationListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (gal GalleryApplicationList) IsEmpty() bool { + return gal.Value == nil || len(*gal.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (gal GalleryApplicationList) hasNextLink() bool { + return gal.NextLink != nil && len(*gal.NextLink) != 0 +} + +// galleryApplicationListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gal GalleryApplicationList) galleryApplicationListPreparer(ctx context.Context) (*http.Request, error) { + if !gal.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gal.NextLink))) +} + +// GalleryApplicationListPage contains a page of GalleryApplication values. +type GalleryApplicationListPage struct { + fn func(context.Context, GalleryApplicationList) (GalleryApplicationList, error) + gal GalleryApplicationList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.gal) + if err != nil { + return err + } + page.gal = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *GalleryApplicationListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryApplicationListPage) NotDone() bool { + return !page.gal.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryApplicationListPage) Response() GalleryApplicationList { + return page.gal +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryApplicationListPage) Values() []GalleryApplication { + if page.gal.IsEmpty() { + return nil + } + return *page.gal.Value +} + +// Creates a new instance of the GalleryApplicationListPage type. +func NewGalleryApplicationListPage(cur GalleryApplicationList, getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage { + return GalleryApplicationListPage{ + fn: getNextPage, + gal: cur, + } +} + +// GalleryApplicationProperties describes the properties of a gallery Application Definition. +type GalleryApplicationProperties struct { + // Description - The description of this gallery Application Definition resource. This property is updatable. + Description *string `json:"description,omitempty"` + // Eula - The Eula agreement for the gallery Application Definition. + Eula *string `json:"eula,omitempty"` + // PrivacyStatementURI - The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + // ReleaseNoteURI - The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Application Definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // SupportedOSType - This property allows you to specify the supported type of the OS that application is built for.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + SupportedOSType OperatingSystemTypes `json:"supportedOSType,omitempty"` +} + +// GalleryApplicationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryApplicationsClient) (GalleryApplication, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationsCreateOrUpdateFuture.Result. +func (future *GalleryApplicationsCreateOrUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ga.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { + ga, err = client.CreateOrUpdateResponder(ga.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + } + } + return } -// Disk disk resource. -type Disk struct { - autorest.Response `json:"-"` - // ManagedBy - READ-ONLY; A relative URI containing the ID of the VM that has the disk attached. - ManagedBy *string `json:"managedBy,omitempty"` - // ManagedByExtended - READ-ONLY; List of relative URIs containing the IDs of the VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs. - ManagedByExtended *[]string `json:"managedByExtended,omitempty"` - Sku *DiskSku `json:"sku,omitempty"` - // Zones - The Logical zone list for Disk. - Zones *[]string `json:"zones,omitempty"` - // ExtendedLocation - The extended location where the disk will be created. Extended location cannot be changed. - ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` - *DiskProperties `json:"properties,omitempty"` +// GalleryApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryApplicationsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationsDeleteFuture.Result. +func (future *GalleryApplicationsDeleteFuture) result(client GalleryApplicationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleryApplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationsUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryApplicationsClient) (GalleryApplication, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationsUpdateFuture.Result. +func (future *GalleryApplicationsUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ga.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { + ga, err = client.UpdateResponder(ga.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryApplicationUpdate specifies information about the gallery Application Definition that you want to +// update. +type GalleryApplicationUpdate struct { + *GalleryApplicationProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for Disk. -func (d Disk) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryApplicationUpdate. +func (gau GalleryApplicationUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if d.Sku != nil { - objectMap["sku"] = d.Sku - } - if d.Zones != nil { - objectMap["zones"] = d.Zones - } - if d.ExtendedLocation != nil { - objectMap["extendedLocation"] = d.ExtendedLocation - } - if d.DiskProperties != nil { - objectMap["properties"] = d.DiskProperties - } - if d.Location != nil { - objectMap["location"] = d.Location + if gau.GalleryApplicationProperties != nil { + objectMap["properties"] = gau.GalleryApplicationProperties } - if d.Tags != nil { - objectMap["tags"] = d.Tags + if gau.Tags != nil { + objectMap["tags"] = gau.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for Disk struct. -func (d *Disk) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryApplicationUpdate struct. +func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -1698,59 +6959,14 @@ func (d *Disk) UnmarshalJSON(body []byte) error { } for k, v := range m { switch k { - case "managedBy": - if v != nil { - var managedBy string - err = json.Unmarshal(*v, &managedBy) - if err != nil { - return err - } - d.ManagedBy = &managedBy - } - case "managedByExtended": - if v != nil { - var managedByExtended []string - err = json.Unmarshal(*v, &managedByExtended) - if err != nil { - return err - } - d.ManagedByExtended = &managedByExtended - } - case "sku": - if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - d.Sku = &sku - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - d.Zones = &zones - } - case "extendedLocation": - if v != nil { - var extendedLocation ExtendedLocation - err = json.Unmarshal(*v, &extendedLocation) - if err != nil { - return err - } - d.ExtendedLocation = &extendedLocation - } case "properties": if v != nil { - var diskProperties DiskProperties - err = json.Unmarshal(*v, &diskProperties) + var galleryApplicationProperties GalleryApplicationProperties + err = json.Unmarshal(*v, &galleryApplicationProperties) if err != nil { return err } - d.DiskProperties = &diskProperties + gau.GalleryApplicationProperties = &galleryApplicationProperties } case "id": if v != nil { @@ -1759,7 +6975,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.ID = &ID + gau.ID = &ID } case "name": if v != nil { @@ -1768,7 +6984,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Name = &name + gau.Name = &name } case "type": if v != nil { @@ -1777,16 +6993,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - d.Location = &location + gau.Type = &typeVar } case "tags": if v != nil { @@ -1795,7 +7002,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Tags = tags + gau.Tags = tags } } } @@ -1803,10 +7010,11 @@ func (d *Disk) UnmarshalJSON(body []byte) error { return nil } -// DiskAccess disk access resource. -type DiskAccess struct { - autorest.Response `json:"-"` - *DiskAccessProperties `json:"properties,omitempty"` +// GalleryApplicationVersion specifies information about the gallery Application Version that you want to +// create or update. +type GalleryApplicationVersion struct { + autorest.Response `json:"-"` + *GalleryApplicationVersionProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -1819,23 +7027,23 @@ type DiskAccess struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DiskAccess. -func (da DiskAccess) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryApplicationVersion. +func (gav GalleryApplicationVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if da.DiskAccessProperties != nil { - objectMap["properties"] = da.DiskAccessProperties + if gav.GalleryApplicationVersionProperties != nil { + objectMap["properties"] = gav.GalleryApplicationVersionProperties } - if da.Location != nil { - objectMap["location"] = da.Location + if gav.Location != nil { + objectMap["location"] = gav.Location } - if da.Tags != nil { - objectMap["tags"] = da.Tags + if gav.Tags != nil { + objectMap["tags"] = gav.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskAccess struct. -func (da *DiskAccess) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersion struct. +func (gav *GalleryApplicationVersion) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -1845,12 +7053,12 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskAccessProperties DiskAccessProperties - err = json.Unmarshal(*v, &diskAccessProperties) + var galleryApplicationVersionProperties GalleryApplicationVersionProperties + err = json.Unmarshal(*v, &galleryApplicationVersionProperties) if err != nil { return err } - da.DiskAccessProperties = &diskAccessProperties + gav.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties } case "id": if v != nil { @@ -1859,7 +7067,7 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { if err != nil { return err } - da.ID = &ID + gav.ID = &ID } case "name": if v != nil { @@ -1868,7 +7076,7 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { if err != nil { return err } - da.Name = &name + gav.Name = &name } case "type": if v != nil { @@ -1877,7 +7085,7 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { if err != nil { return err } - da.Type = &typeVar + gav.Type = &typeVar } case "location": if v != nil { @@ -1886,7 +7094,7 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { if err != nil { return err } - da.Location = &location + gav.Location = &location } case "tags": if v != nil { @@ -1895,7 +7103,7 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { if err != nil { return err } - da.Tags = tags + gav.Tags = tags } } } @@ -1903,229 +7111,27 @@ func (da *DiskAccess) UnmarshalJSON(body []byte) error { return nil } -// DiskAccessesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type DiskAccessesCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskAccessesClient) (DiskAccess, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskAccessesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DiskAccessesCreateOrUpdateFuture.Result. -func (future *DiskAccessesCreateOrUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - da.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent { - da, err = client.CreateOrUpdateResponder(da.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", da.Response.Response, "Failure responding to request") - } - } - return -} - -// DiskAccessesDeleteAPrivateEndpointConnectionFuture an abstraction for monitoring and retrieving the -// results of a long-running operation. -type DiskAccessesDeleteAPrivateEndpointConnectionFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskAccessesClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskAccessesDeleteAPrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DiskAccessesDeleteAPrivateEndpointConnectionFuture.Result. -func (future *DiskAccessesDeleteAPrivateEndpointConnectionFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteAPrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteAPrivateEndpointConnectionFuture") - return - } - ar.Response = future.Response() - return -} - -// DiskAccessesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DiskAccessesDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskAccessesClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskAccessesDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DiskAccessesDeleteFuture.Result. -func (future *DiskAccessesDeleteFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// DiskAccessesUpdateAPrivateEndpointConnectionFuture an abstraction for monitoring and retrieving the -// results of a long-running operation. -type DiskAccessesUpdateAPrivateEndpointConnectionFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskAccessesClient) (PrivateEndpointConnection, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskAccessesUpdateAPrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DiskAccessesUpdateAPrivateEndpointConnectionFuture.Result. -func (future *DiskAccessesUpdateAPrivateEndpointConnectionFuture) result(client DiskAccessesClient) (pec PrivateEndpointConnection, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - pec.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if pec.Response.Response, err = future.GetResult(sender); err == nil && pec.Response.Response.StatusCode != http.StatusNoContent { - pec, err = client.UpdateAPrivateEndpointConnectionResponder(pec.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture", "Result", pec.Response.Response, "Failure responding to request") - } - } - return -} - -// DiskAccessesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DiskAccessesUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskAccessesClient) (DiskAccess, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskAccessesUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DiskAccessesUpdateFuture.Result. -func (future *DiskAccessesUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - da.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent { - da, err = client.UpdateResponder(da.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", da.Response.Response, "Failure responding to request") - } - } - return -} - -// DiskAccessList the List disk access operation response. -type DiskAccessList struct { +// GalleryApplicationVersionList the List Gallery Application version operation response. +type GalleryApplicationVersionList struct { autorest.Response `json:"-"` - // Value - A list of disk access resources. - Value *[]DiskAccess `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disk access resources. Call ListNext() with this to fetch the next page of disk access resources. + // Value - A list of gallery Application Versions. + Value *[]GalleryApplicationVersion `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of gallery Application Versions. Call ListNext() with this to fetch the next page of gallery Application Versions. NextLink *string `json:"nextLink,omitempty"` } -// DiskAccessListIterator provides access to a complete listing of DiskAccess values. -type DiskAccessListIterator struct { +// GalleryApplicationVersionListIterator provides access to a complete listing of GalleryApplicationVersion +// values. +type GalleryApplicationVersionListIterator struct { i int - page DiskAccessListPage + page GalleryApplicationVersionListPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DiskAccessListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *GalleryApplicationVersionListIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -2150,67 +7156,67 @@ func (iter *DiskAccessListIterator) NextWithContext(ctx context.Context) (err er // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *DiskAccessListIterator) Next() error { +func (iter *GalleryApplicationVersionListIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskAccessListIterator) NotDone() bool { +func (iter GalleryApplicationVersionListIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DiskAccessListIterator) Response() DiskAccessList { +func (iter GalleryApplicationVersionListIterator) Response() GalleryApplicationVersionList { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DiskAccessListIterator) Value() DiskAccess { +func (iter GalleryApplicationVersionListIterator) Value() GalleryApplicationVersion { if !iter.page.NotDone() { - return DiskAccess{} + return GalleryApplicationVersion{} } return iter.page.Values()[iter.i] } - -// Creates a new instance of the DiskAccessListIterator type. -func NewDiskAccessListIterator(page DiskAccessListPage) DiskAccessListIterator { - return DiskAccessListIterator{page: page} + +// Creates a new instance of the GalleryApplicationVersionListIterator type. +func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator { + return GalleryApplicationVersionListIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (dal DiskAccessList) IsEmpty() bool { - return dal.Value == nil || len(*dal.Value) == 0 +func (gavl GalleryApplicationVersionList) IsEmpty() bool { + return gavl.Value == nil || len(*gavl.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (dal DiskAccessList) hasNextLink() bool { - return dal.NextLink != nil && len(*dal.NextLink) != 0 +func (gavl GalleryApplicationVersionList) hasNextLink() bool { + return gavl.NextLink != nil && len(*gavl.NextLink) != 0 } -// diskAccessListPreparer prepares a request to retrieve the next set of results. +// galleryApplicationVersionListPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (dal DiskAccessList) diskAccessListPreparer(ctx context.Context) (*http.Request, error) { - if !dal.hasNextLink() { +func (gavl GalleryApplicationVersionList) galleryApplicationVersionListPreparer(ctx context.Context) (*http.Request, error) { + if !gavl.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(dal.NextLink))) + autorest.WithBaseURL(to.String(gavl.NextLink))) } -// DiskAccessListPage contains a page of DiskAccess values. -type DiskAccessListPage struct { - fn func(context.Context, DiskAccessList) (DiskAccessList, error) - dal DiskAccessList +// GalleryApplicationVersionListPage contains a page of GalleryApplicationVersion values. +type GalleryApplicationVersionListPage struct { + fn func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error) + gavl GalleryApplicationVersionList } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DiskAccessListPage) NextWithContext(ctx context.Context) (err error) { +func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -2220,11 +7226,11 @@ func (page *DiskAccessListPage) NextWithContext(ctx context.Context) (err error) }() } for { - next, err := page.fn(ctx, page.dal) + next, err := page.fn(ctx, page.gavl) if err != nil { return err } - page.dal = next + page.gavl = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -2235,104 +7241,260 @@ func (page *DiskAccessListPage) NextWithContext(ctx context.Context) (err error) // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *DiskAccessListPage) Next() error { +func (page *GalleryApplicationVersionListPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskAccessListPage) NotDone() bool { - return !page.dal.IsEmpty() +func (page GalleryApplicationVersionListPage) NotDone() bool { + return !page.gavl.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DiskAccessListPage) Response() DiskAccessList { - return page.dal +func (page GalleryApplicationVersionListPage) Response() GalleryApplicationVersionList { + return page.gavl } // Values returns the slice of values for the current page or nil if there are no values. -func (page DiskAccessListPage) Values() []DiskAccess { - if page.dal.IsEmpty() { +func (page GalleryApplicationVersionListPage) Values() []GalleryApplicationVersion { + if page.gavl.IsEmpty() { return nil } - return *page.dal.Value + return *page.gavl.Value } -// Creates a new instance of the DiskAccessListPage type. -func NewDiskAccessListPage(cur DiskAccessList, getNextPage func(context.Context, DiskAccessList) (DiskAccessList, error)) DiskAccessListPage { - return DiskAccessListPage{ - fn: getNextPage, - dal: cur, +// Creates a new instance of the GalleryApplicationVersionListPage type. +func NewGalleryApplicationVersionListPage(cur GalleryApplicationVersionList, getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage { + return GalleryApplicationVersionListPage{ + fn: getNextPage, + gavl: cur, } } -// DiskAccessProperties ... -type DiskAccessProperties struct { - // PrivateEndpointConnections - READ-ONLY; A readonly collection of private endpoint connections created on the disk. Currently only one endpoint connection is supported. - PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` - // ProvisioningState - READ-ONLY; The disk access resource provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // TimeCreated - READ-ONLY; The time when the disk access was created. - TimeCreated *date.Time `json:"timeCreated,omitempty"` +// GalleryApplicationVersionProperties describes the properties of a gallery image version. +type GalleryApplicationVersionProperties struct { + PublishingProfile *GalleryApplicationVersionPublishingProfile `json:"publishingProfile,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating' + ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"` + // ReplicationStatus - READ-ONLY + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` } -// MarshalJSON is the custom marshaler for DiskAccessProperties. -func (dap DiskAccessProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryApplicationVersionProperties. +func (gavp GalleryApplicationVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) + if gavp.PublishingProfile != nil { + objectMap["publishingProfile"] = gavp.PublishingProfile + } return json.Marshal(objectMap) } -// DiskAccessUpdate used for updating a disk access resource. -type DiskAccessUpdate struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` +// GalleryApplicationVersionPublishingProfile the publishing profile of a gallery image version. +type GalleryApplicationVersionPublishingProfile struct { + Source *UserArtifactSource `json:"source,omitempty"` + ManageActions *UserArtifactManage `json:"manageActions,omitempty"` + // EnableHealthCheck - Optional. Whether or not this application reports health. + EnableHealthCheck *bool `json:"enableHealthCheck,omitempty"` + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - READ-ONLY; The timestamp for when the gallery image version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` + // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow' + ReplicationMode ReplicationMode `json:"replicationMode,omitempty"` } -// MarshalJSON is the custom marshaler for DiskAccessUpdate. -func (dau DiskAccessUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryApplicationVersionPublishingProfile. +func (gavpp GalleryApplicationVersionPublishingProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dau.Tags != nil { - objectMap["tags"] = dau.Tags + if gavpp.Source != nil { + objectMap["source"] = gavpp.Source + } + if gavpp.ManageActions != nil { + objectMap["manageActions"] = gavpp.ManageActions + } + if gavpp.EnableHealthCheck != nil { + objectMap["enableHealthCheck"] = gavpp.EnableHealthCheck + } + if gavpp.TargetRegions != nil { + objectMap["targetRegions"] = gavpp.TargetRegions + } + if gavpp.ReplicaCount != nil { + objectMap["replicaCount"] = gavpp.ReplicaCount + } + if gavpp.ExcludeFromLatest != nil { + objectMap["excludeFromLatest"] = gavpp.ExcludeFromLatest + } + if gavpp.EndOfLifeDate != nil { + objectMap["endOfLifeDate"] = gavpp.EndOfLifeDate + } + if gavpp.StorageAccountType != "" { + objectMap["storageAccountType"] = gavpp.StorageAccountType + } + if gavpp.ReplicationMode != "" { + objectMap["replicationMode"] = gavpp.ReplicationMode + } + return json.Marshal(objectMap) +} + +// GalleryApplicationVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type GalleryApplicationVersionsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationVersionsCreateOrUpdateFuture.Result. +func (future *GalleryApplicationVersionsCreateOrUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + gav.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { + gav, err = client.CreateOrUpdateResponder(gav.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryApplicationVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationVersionsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryApplicationVersionsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationVersionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationVersionsDeleteFuture.Result. +func (future *GalleryApplicationVersionsDeleteFuture) result(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleryApplicationVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationVersionsUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryApplicationVersionsUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryApplicationVersionsUpdateFuture.Result. +func (future *GalleryApplicationVersionsUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + gav.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { + gav, err = client.UpdateResponder(gav.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") + } } - return json.Marshal(objectMap) + return } -// DiskEncryptionSet disk encryption set resource. -type DiskEncryptionSet struct { - autorest.Response `json:"-"` - Identity *EncryptionSetIdentity `json:"identity,omitempty"` - *EncryptionSetProperties `json:"properties,omitempty"` +// GalleryApplicationVersionUpdate specifies information about the gallery Application Version that you +// want to update. +type GalleryApplicationVersionUpdate struct { + *GalleryApplicationVersionProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DiskEncryptionSet. -func (desVar DiskEncryptionSet) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryApplicationVersionUpdate. +func (gavu GalleryApplicationVersionUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if desVar.Identity != nil { - objectMap["identity"] = desVar.Identity - } - if desVar.EncryptionSetProperties != nil { - objectMap["properties"] = desVar.EncryptionSetProperties - } - if desVar.Location != nil { - objectMap["location"] = desVar.Location + if gavu.GalleryApplicationVersionProperties != nil { + objectMap["properties"] = gavu.GalleryApplicationVersionProperties } - if desVar.Tags != nil { - objectMap["tags"] = desVar.Tags + if gavu.Tags != nil { + objectMap["tags"] = gavu.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSet struct. -func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersionUpdate struct. +func (gavu *GalleryApplicationVersionUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -2340,23 +7502,14 @@ func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { } for k, v := range m { switch k { - case "identity": - if v != nil { - var identity EncryptionSetIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - desVar.Identity = &identity - } case "properties": if v != nil { - var encryptionSetProperties EncryptionSetProperties - err = json.Unmarshal(*v, &encryptionSetProperties) + var galleryApplicationVersionProperties GalleryApplicationVersionProperties + err = json.Unmarshal(*v, &galleryApplicationVersionProperties) if err != nil { return err } - desVar.EncryptionSetProperties = &encryptionSetProperties + gavu.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties } case "id": if v != nil { @@ -2365,7 +7518,7 @@ func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { if err != nil { return err } - desVar.ID = &ID + gavu.ID = &ID } case "name": if v != nil { @@ -2374,7 +7527,7 @@ func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { if err != nil { return err } - desVar.Name = &name + gavu.Name = &name } case "type": if v != nil { @@ -2383,16 +7536,7 @@ func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { if err != nil { return err } - desVar.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - desVar.Location = &location + gavu.Type = &typeVar } case "tags": if v != nil { @@ -2401,339 +7545,161 @@ func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error { if err != nil { return err } - desVar.Tags = tags - } - } - } - - return nil -} - -// DiskEncryptionSetList the List disk encryption set operation response. -type DiskEncryptionSetList struct { - autorest.Response `json:"-"` - // Value - A list of disk encryption sets. - Value *[]DiskEncryptionSet `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disk encryption sets. Call ListNext() with this to fetch the next page of disk encryption sets. - NextLink *string `json:"nextLink,omitempty"` -} - -// DiskEncryptionSetListIterator provides access to a complete listing of DiskEncryptionSet values. -type DiskEncryptionSetListIterator struct { - i int - page DiskEncryptionSetListPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DiskEncryptionSetListIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DiskEncryptionSetListIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskEncryptionSetListIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DiskEncryptionSetListIterator) Response() DiskEncryptionSetList { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DiskEncryptionSetListIterator) Value() DiskEncryptionSet { - if !iter.page.NotDone() { - return DiskEncryptionSet{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DiskEncryptionSetListIterator type. -func NewDiskEncryptionSetListIterator(page DiskEncryptionSetListPage) DiskEncryptionSetListIterator { - return DiskEncryptionSetListIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (desl DiskEncryptionSetList) IsEmpty() bool { - return desl.Value == nil || len(*desl.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (desl DiskEncryptionSetList) hasNextLink() bool { - return desl.NextLink != nil && len(*desl.NextLink) != 0 -} - -// diskEncryptionSetListPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (desl DiskEncryptionSetList) diskEncryptionSetListPreparer(ctx context.Context) (*http.Request, error) { - if !desl.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(desl.NextLink))) -} - -// DiskEncryptionSetListPage contains a page of DiskEncryptionSet values. -type DiskEncryptionSetListPage struct { - fn func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error) - desl DiskEncryptionSetList -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DiskEncryptionSetListPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode + gavu.Tags = tags } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.desl) - if err != nil { - return err - } - page.desl = next - if !next.hasNextLink() || !next.IsEmpty() { - break } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DiskEncryptionSetListPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskEncryptionSetListPage) NotDone() bool { - return !page.desl.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DiskEncryptionSetListPage) Response() DiskEncryptionSetList { - return page.desl -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DiskEncryptionSetListPage) Values() []DiskEncryptionSet { - if page.desl.IsEmpty() { - return nil - } - return *page.desl.Value -} - -// Creates a new instance of the DiskEncryptionSetListPage type. -func NewDiskEncryptionSetListPage(cur DiskEncryptionSetList, getNextPage func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error)) DiskEncryptionSetListPage { - return DiskEncryptionSetListPage{ - fn: getNextPage, - desl: cur, - } -} - -// DiskEncryptionSetParameters describes the parameter of customer managed disk encryption set resource id -// that can be specified for disk.

NOTE: The disk encryption set resource id can only be specified -// for managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more details. -type DiskEncryptionSetParameters struct { - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// DiskEncryptionSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type DiskEncryptionSetsCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskEncryptionSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result + } + return nil } -// result is the default implementation for DiskEncryptionSetsCreateOrUpdateFuture.Result. -func (future *DiskEncryptionSetsCreateOrUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return +// GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile. +type GalleryArtifactPublishingProfileBase struct { + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - READ-ONLY; The timestamp for when the gallery image version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` + // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow' + ReplicationMode ReplicationMode `json:"replicationMode,omitempty"` +} + +// MarshalJSON is the custom marshaler for GalleryArtifactPublishingProfileBase. +func (gappb GalleryArtifactPublishingProfileBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gappb.TargetRegions != nil { + objectMap["targetRegions"] = gappb.TargetRegions } - if !done { - desVar.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsCreateOrUpdateFuture") - return + if gappb.ReplicaCount != nil { + objectMap["replicaCount"] = gappb.ReplicaCount } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { - desVar, err = client.CreateOrUpdateResponder(desVar.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") - } + if gappb.ExcludeFromLatest != nil { + objectMap["excludeFromLatest"] = gappb.ExcludeFromLatest } - return + if gappb.EndOfLifeDate != nil { + objectMap["endOfLifeDate"] = gappb.EndOfLifeDate + } + if gappb.StorageAccountType != "" { + objectMap["storageAccountType"] = gappb.StorageAccountType + } + if gappb.ReplicationMode != "" { + objectMap["replicationMode"] = gappb.ReplicationMode + } + return json.Marshal(objectMap) } -// DiskEncryptionSetsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type DiskEncryptionSetsDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskEncryptionSetsClient) (autorest.Response, error) +// GalleryArtifactSource the source image from which the Image Version is going to be created. +type GalleryArtifactSource struct { + ManagedImage *ManagedArtifact `json:"managedImage,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskEncryptionSetsDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil +// GalleryArtifactVersionSource the gallery artifact version source. +type GalleryArtifactVersionSource struct { + // ID - The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + ID *string `json:"id,omitempty"` + // URI - The uri of the gallery artifact version source. Currently used to specify vhd/blob source. + URI *string `json:"uri,omitempty"` } -// result is the default implementation for DiskEncryptionSetsDeleteFuture.Result. -func (future *DiskEncryptionSetsDeleteFuture) result(client DiskEncryptionSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsDeleteFuture", "Result", future.Response(), "Polling failure") - return +// GalleryDataDiskImage this is the data disk image. +type GalleryDataDiskImage struct { + // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. + Lun *int32 `json:"lun,omitempty"` + // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` + Source *GalleryArtifactVersionSource `json:"source,omitempty"` +} + +// MarshalJSON is the custom marshaler for GalleryDataDiskImage. +func (gddi GalleryDataDiskImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gddi.Lun != nil { + objectMap["lun"] = gddi.Lun } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsDeleteFuture") - return + if gddi.HostCaching != "" { + objectMap["hostCaching"] = gddi.HostCaching } - ar.Response = future.Response() - return + if gddi.Source != nil { + objectMap["source"] = gddi.Source + } + return json.Marshal(objectMap) } -// DiskEncryptionSetsUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type DiskEncryptionSetsUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error) +// GalleryDiskImage this is the disk image base class. +type GalleryDiskImage struct { + // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` + Source *GalleryArtifactVersionSource `json:"source,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskEncryptionSetsUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// MarshalJSON is the custom marshaler for GalleryDiskImage. +func (gdi GalleryDiskImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gdi.HostCaching != "" { + objectMap["hostCaching"] = gdi.HostCaching } - future.FutureAPI = &azFuture - future.Result = future.result - return nil + if gdi.Source != nil { + objectMap["source"] = gdi.Source + } + return json.Marshal(objectMap) } -// result is the default implementation for DiskEncryptionSetsUpdateFuture.Result. -func (future *DiskEncryptionSetsUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - desVar.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent { - desVar, err = client.UpdateResponder(desVar.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request") - } - } - return +// GalleryIdentifier describes the gallery unique name. +type GalleryIdentifier struct { + // UniqueName - READ-ONLY; The unique name of the Shared Image Gallery. This name is generated automatically by Azure. + UniqueName *string `json:"uniqueName,omitempty"` } -// DiskEncryptionSettings describes a Encryption Settings for a Disk -type DiskEncryptionSettings struct { - // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. - DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. - KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` - // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. - Enabled *bool `json:"enabled,omitempty"` +// MarshalJSON is the custom marshaler for GalleryIdentifier. +func (gi GalleryIdentifier) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// DiskEncryptionSetUpdate disk encryption set update resource. -type DiskEncryptionSetUpdate struct { - *DiskEncryptionSetUpdateProperties `json:"properties,omitempty"` +// GalleryImage specifies information about the gallery image definition that you want to create or update. +type GalleryImage struct { + autorest.Response `json:"-"` + *GalleryImageProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` // Tags - Resource tags - Tags map[string]*string `json:"tags"` - Identity *EncryptionSetIdentity `json:"identity,omitempty"` + Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DiskEncryptionSetUpdate. -func (desu DiskEncryptionSetUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryImage. +func (gi GalleryImage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if desu.DiskEncryptionSetUpdateProperties != nil { - objectMap["properties"] = desu.DiskEncryptionSetUpdateProperties + if gi.GalleryImageProperties != nil { + objectMap["properties"] = gi.GalleryImageProperties } - if desu.Tags != nil { - objectMap["tags"] = desu.Tags + if gi.Location != nil { + objectMap["location"] = gi.Location } - if desu.Identity != nil { - objectMap["identity"] = desu.Identity + if gi.Tags != nil { + objectMap["tags"] = gi.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSetUpdate struct. -func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryImage struct. +func (gi *GalleryImage) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -2743,30 +7709,57 @@ func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskEncryptionSetUpdateProperties DiskEncryptionSetUpdateProperties - err = json.Unmarshal(*v, &diskEncryptionSetUpdateProperties) + var galleryImageProperties GalleryImageProperties + err = json.Unmarshal(*v, &galleryImageProperties) if err != nil { return err } - desu.DiskEncryptionSetUpdateProperties = &diskEncryptionSetUpdateProperties + gi.GalleryImageProperties = &galleryImageProperties } - case "tags": + case "id": if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) + var ID string + err = json.Unmarshal(*v, &ID) if err != nil { return err } - desu.Tags = tags + gi.ID = &ID } - case "identity": + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + gi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + gi.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + gi.Location = &location + } + case "tags": if v != nil { - var identity EncryptionSetIdentity - err = json.Unmarshal(*v, &identity) + var tags map[string]*string + err = json.Unmarshal(*v, &tags) if err != nil { return err } - desu.Identity = &identity + gi.Tags = tags } } } @@ -2774,51 +7767,44 @@ func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error { return nil } -// DiskEncryptionSetUpdateProperties disk encryption set resource update properties. -type DiskEncryptionSetUpdateProperties struct { - // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys' - EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"` - ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"` - // RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version. - RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"` -} - -// DiskImageEncryption this is the disk image encryption base class. -type DiskImageEncryption struct { - // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set. - DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` +// GalleryImageFeature a feature for gallery image. +type GalleryImageFeature struct { + // Name - The name of the gallery image feature. + Name *string `json:"name,omitempty"` + // Value - The value of the gallery image feature. + Value *string `json:"value,omitempty"` } -// DiskInstanceView the instance view of the disk. -type DiskInstanceView struct { - // Name - The disk name. - Name *string `json:"name,omitempty"` - // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 - EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +// GalleryImageIdentifier this is the gallery image definition identifier. +type GalleryImageIdentifier struct { + // Publisher - The name of the gallery image definition publisher. + Publisher *string `json:"publisher,omitempty"` + // Offer - The name of the gallery image definition offer. + Offer *string `json:"offer,omitempty"` + // Sku - The name of the gallery image definition SKU. + Sku *string `json:"sku,omitempty"` } -// DiskList the List Disks operation response. -type DiskList struct { +// GalleryImageList the List Gallery Images operation response. +type GalleryImageList struct { autorest.Response `json:"-"` - // Value - A list of disks. - Value *[]Disk `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + // Value - A list of Shared Image Gallery images. + Value *[]GalleryImage `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Image Definitions in the Shared Image Gallery. Call ListNext() with this to fetch the next page of gallery image definitions. NextLink *string `json:"nextLink,omitempty"` } -// DiskListIterator provides access to a complete listing of Disk values. -type DiskListIterator struct { +// GalleryImageListIterator provides access to a complete listing of GalleryImage values. +type GalleryImageListIterator struct { i int - page DiskListPage + page GalleryImageListPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *GalleryImageListIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageListIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -2843,67 +7829,67 @@ func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *DiskListIterator) Next() error { +func (iter *GalleryImageListIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskListIterator) NotDone() bool { +func (iter GalleryImageListIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DiskListIterator) Response() DiskList { +func (iter GalleryImageListIterator) Response() GalleryImageList { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DiskListIterator) Value() Disk { +func (iter GalleryImageListIterator) Value() GalleryImage { if !iter.page.NotDone() { - return Disk{} + return GalleryImage{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the DiskListIterator type. -func NewDiskListIterator(page DiskListPage) DiskListIterator { - return DiskListIterator{page: page} +// Creates a new instance of the GalleryImageListIterator type. +func NewGalleryImageListIterator(page GalleryImageListPage) GalleryImageListIterator { + return GalleryImageListIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (dl DiskList) IsEmpty() bool { - return dl.Value == nil || len(*dl.Value) == 0 +func (gil GalleryImageList) IsEmpty() bool { + return gil.Value == nil || len(*gil.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (dl DiskList) hasNextLink() bool { - return dl.NextLink != nil && len(*dl.NextLink) != 0 +func (gil GalleryImageList) hasNextLink() bool { + return gil.NextLink != nil && len(*gil.NextLink) != 0 } -// diskListPreparer prepares a request to retrieve the next set of results. +// galleryImageListPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (dl DiskList) diskListPreparer(ctx context.Context) (*http.Request, error) { - if !dl.hasNextLink() { +func (gil GalleryImageList) galleryImageListPreparer(ctx context.Context) (*http.Request, error) { + if !gil.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(dl.NextLink))) + autorest.WithBaseURL(to.String(gil.NextLink))) } -// DiskListPage contains a page of Disk values. -type DiskListPage struct { - fn func(context.Context, DiskList) (DiskList, error) - dl DiskList +// GalleryImageListPage contains a page of GalleryImage values. +type GalleryImageListPage struct { + fn func(context.Context, GalleryImageList) (GalleryImageList, error) + gil GalleryImageList } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { +func (page *GalleryImageListPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageListPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -2913,11 +7899,11 @@ func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { }() } for { - next, err := page.fn(ctx, page.dl) + next, err := page.fn(ctx, page.gil) if err != nil { return err } - page.dl = next + page.gil = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -2928,176 +7914,259 @@ func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *DiskListPage) Next() error { +func (page *GalleryImageListPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskListPage) NotDone() bool { - return !page.dl.IsEmpty() +func (page GalleryImageListPage) NotDone() bool { + return !page.gil.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DiskListPage) Response() DiskList { - return page.dl +func (page GalleryImageListPage) Response() GalleryImageList { + return page.gil } // Values returns the slice of values for the current page or nil if there are no values. -func (page DiskListPage) Values() []Disk { - if page.dl.IsEmpty() { +func (page GalleryImageListPage) Values() []GalleryImage { + if page.gil.IsEmpty() { return nil } - return *page.dl.Value + return *page.gil.Value } -// Creates a new instance of the DiskListPage type. -func NewDiskListPage(cur DiskList, getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage { - return DiskListPage{ - fn: getNextPage, - dl: cur, +// Creates a new instance of the GalleryImageListPage type. +func NewGalleryImageListPage(cur GalleryImageList, getNextPage func(context.Context, GalleryImageList) (GalleryImageList, error)) GalleryImageListPage { + return GalleryImageListPage{ + fn: getNextPage, + gil: cur, } } -// DiskProperties disk resource properties. -type DiskProperties struct { - // TimeCreated - READ-ONLY; The time when the disk was created. - TimeCreated *date.Time `json:"timeCreated,omitempty"` - // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' +// GalleryImageProperties describes the properties of a gallery image definition. +type GalleryImageProperties struct { + // Description - The description of this gallery image definition resource. This property is updatable. + Description *string `json:"description,omitempty"` + // Eula - The Eula agreement for the gallery image definition. + Eula *string `json:"eula,omitempty"` + // PrivacyStatementURI - The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + // ReleaseNoteURI - The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` + // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` - // PurchasePlan - Purchase plan information for the the image from which the OS disk was created. E.g. - {name: 2019-Datacenter, publisher: MicrosoftWindowsServer, product: WindowsServer} - PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` - // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. - CreationData *CreationData `json:"creationData,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only. - DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"` - // UniqueID - READ-ONLY; Unique Guid identifying the resource. - UniqueID *string `json:"uniqueId,omitempty"` - // EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. - EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` - // ProvisioningState - READ-ONLY; The disk provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. - DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` - // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. - DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` - // DiskIOPSReadOnly - The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. - DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"` - // DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. - DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"` - // DiskState - The state of the disk. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateActiveSAS', 'DiskStateReadyToUpload', 'DiskStateActiveUpload' - DiskState DiskState `json:"diskState,omitempty"` - // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. - Encryption *Encryption `json:"encryption,omitempty"` - // MaxShares - The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. - MaxShares *int32 `json:"maxShares,omitempty"` - // ShareInfo - READ-ONLY; Details of the list of all VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs. - ShareInfo *[]ShareInfoElement `json:"shareInfo,omitempty"` - // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll' - NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` - // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. - DiskAccessID *string `json:"diskAccessId,omitempty"` - // Tier - Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks. - Tier *string `json:"tier,omitempty"` - // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. - BurstingEnabled *bool `json:"burstingEnabled,omitempty"` - // PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending. - PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"` - // SupportsHibernation - Indicates the OS on a disk supports hibernation. - SupportsHibernation *bool `json:"supportsHibernation,omitempty"` - // SecurityProfile - Contains the security related information for the resource. - SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` + Disallowed *Disallowed `json:"disallowed,omitempty"` + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating' + ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"` + // Features - A list of gallery image features. + Features *[]GalleryImageFeature `json:"features,omitempty"` } -// MarshalJSON is the custom marshaler for DiskProperties. -func (dp DiskProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryImageProperties. +func (gip GalleryImageProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if dp.OsType != "" { - objectMap["osType"] = dp.OsType + if gip.Description != nil { + objectMap["description"] = gip.Description } - if dp.HyperVGeneration != "" { - objectMap["hyperVGeneration"] = dp.HyperVGeneration + if gip.Eula != nil { + objectMap["eula"] = gip.Eula } - if dp.PurchasePlan != nil { - objectMap["purchasePlan"] = dp.PurchasePlan + if gip.PrivacyStatementURI != nil { + objectMap["privacyStatementUri"] = gip.PrivacyStatementURI } - if dp.CreationData != nil { - objectMap["creationData"] = dp.CreationData + if gip.ReleaseNoteURI != nil { + objectMap["releaseNoteUri"] = gip.ReleaseNoteURI } - if dp.DiskSizeGB != nil { - objectMap["diskSizeGB"] = dp.DiskSizeGB + if gip.OsType != "" { + objectMap["osType"] = gip.OsType } - if dp.EncryptionSettingsCollection != nil { - objectMap["encryptionSettingsCollection"] = dp.EncryptionSettingsCollection + if gip.OsState != "" { + objectMap["osState"] = gip.OsState } - if dp.DiskIOPSReadWrite != nil { - objectMap["diskIOPSReadWrite"] = dp.DiskIOPSReadWrite + if gip.HyperVGeneration != "" { + objectMap["hyperVGeneration"] = gip.HyperVGeneration } - if dp.DiskMBpsReadWrite != nil { - objectMap["diskMBpsReadWrite"] = dp.DiskMBpsReadWrite + if gip.EndOfLifeDate != nil { + objectMap["endOfLifeDate"] = gip.EndOfLifeDate } - if dp.DiskIOPSReadOnly != nil { - objectMap["diskIOPSReadOnly"] = dp.DiskIOPSReadOnly + if gip.Identifier != nil { + objectMap["identifier"] = gip.Identifier + } + if gip.Recommended != nil { + objectMap["recommended"] = gip.Recommended + } + if gip.Disallowed != nil { + objectMap["disallowed"] = gip.Disallowed + } + if gip.PurchasePlan != nil { + objectMap["purchasePlan"] = gip.PurchasePlan + } + if gip.Features != nil { + objectMap["features"] = gip.Features + } + return json.Marshal(objectMap) +} + +// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImagesCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryImagesClient) (GalleryImage, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err } - if dp.DiskMBpsReadOnly != nil { - objectMap["diskMBpsReadOnly"] = dp.DiskMBpsReadOnly + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesCreateOrUpdateFuture.Result. +func (future *GalleryImagesCreateOrUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return } - if dp.DiskState != "" { - objectMap["diskState"] = dp.DiskState + if !done { + gi.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") + return } - if dp.Encryption != nil { - objectMap["encryption"] = dp.Encryption + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.CreateOrUpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } } - if dp.MaxShares != nil { - objectMap["maxShares"] = dp.MaxShares + return +} + +// GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImagesDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryImagesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err } - if dp.NetworkAccessPolicy != "" { - objectMap["networkAccessPolicy"] = dp.NetworkAccessPolicy + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesDeleteFuture.Result. +func (future *GalleryImagesDeleteFuture) result(client GalleryImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return } - if dp.DiskAccessID != nil { - objectMap["diskAccessId"] = dp.DiskAccessID + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") + return } - if dp.Tier != nil { - objectMap["tier"] = dp.Tier + ar.Response = future.Response() + return +} + +// GalleryImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImagesUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(GalleryImagesClient) (GalleryImage, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *GalleryImagesUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err } - if dp.BurstingEnabled != nil { - objectMap["burstingEnabled"] = dp.BurstingEnabled + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for GalleryImagesUpdateFuture.Result. +func (future *GalleryImagesUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", future.Response(), "Polling failure") + return } - if dp.SupportsHibernation != nil { - objectMap["supportsHibernation"] = dp.SupportsHibernation + if !done { + gi.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesUpdateFuture") + return } - if dp.SecurityProfile != nil { - objectMap["securityProfile"] = dp.SecurityProfile + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.UpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } } - return json.Marshal(objectMap) + return } -// DiskRestorePoint properties of disk restore point -type DiskRestorePoint struct { - autorest.Response `json:"-"` - *DiskRestorePointProperties `json:"properties,omitempty"` +// GalleryImageUpdate specifies information about the gallery image definition that you want to update. +type GalleryImageUpdate struct { + *GalleryImageProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type Type *string `json:"type,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for DiskRestorePoint. -func (drp DiskRestorePoint) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryImageUpdate. +func (giu GalleryImageUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if drp.DiskRestorePointProperties != nil { - objectMap["properties"] = drp.DiskRestorePointProperties + if giu.GalleryImageProperties != nil { + objectMap["properties"] = giu.GalleryImageProperties + } + if giu.Tags != nil { + objectMap["tags"] = giu.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskRestorePoint struct. -func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryImageUpdate struct. +func (giu *GalleryImageUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -3107,12 +8176,12 @@ func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskRestorePointProperties DiskRestorePointProperties - err = json.Unmarshal(*v, &diskRestorePointProperties) + var galleryImageProperties GalleryImageProperties + err = json.Unmarshal(*v, &galleryImageProperties) if err != nil { return err } - drp.DiskRestorePointProperties = &diskRestorePointProperties + giu.GalleryImageProperties = &galleryImageProperties } case "id": if v != nil { @@ -3121,7 +8190,7 @@ func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { if err != nil { return err } - drp.ID = &ID + giu.ID = &ID } case "name": if v != nil { @@ -3130,7 +8199,7 @@ func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { if err != nil { return err } - drp.Name = &name + giu.Name = &name } case "type": if v != nil { @@ -3139,7 +8208,16 @@ func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { if err != nil { return err } - drp.Type = &typeVar + giu.Type = &typeVar + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + giu.Tags = tags } } } @@ -3147,69 +8225,127 @@ func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error { return nil } -// DiskRestorePointGrantAccessFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type DiskRestorePointGrantAccessFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskRestorePointClient) (AccessURI, error) +// GalleryImageVersion specifies information about the gallery image version that you want to create or +// update. +type GalleryImageVersion struct { + autorest.Response `json:"-"` + *GalleryImageVersionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskRestorePointGrantAccessFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// MarshalJSON is the custom marshaler for GalleryImageVersion. +func (giv GalleryImageVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if giv.GalleryImageVersionProperties != nil { + objectMap["properties"] = giv.GalleryImageVersionProperties } - future.FutureAPI = &azFuture - future.Result = future.result - return nil + if giv.Location != nil { + objectMap["location"] = giv.Location + } + if giv.Tags != nil { + objectMap["tags"] = giv.Tags + } + return json.Marshal(objectMap) } -// result is the default implementation for DiskRestorePointGrantAccessFuture.Result. -func (future *DiskRestorePointGrantAccessFuture) result(client DiskRestorePointClient) (au AccessURI, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) +// UnmarshalJSON is the custom unmarshaler for GalleryImageVersion struct. +func (giv *GalleryImageVersion) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskRestorePointGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - au.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskRestorePointGrantAccessFuture") - return + return err } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskRestorePointGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryImageVersionProperties GalleryImageVersionProperties + err = json.Unmarshal(*v, &galleryImageVersionProperties) + if err != nil { + return err + } + giv.GalleryImageVersionProperties = &galleryImageVersionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + giv.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + giv.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + giv.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + giv.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + giv.Tags = tags + } } } - return + + return nil } -// DiskRestorePointList the List Disk Restore Points operation response. -type DiskRestorePointList struct { +// GalleryImageVersionList the List Gallery Image version operation response. +type GalleryImageVersionList struct { autorest.Response `json:"-"` - // Value - A list of disk restore points. - Value *[]DiskRestorePoint `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disk restore points. Call ListNext() with this to fetch the next page of disk restore points. + // Value - A list of gallery image versions. + Value *[]GalleryImageVersion `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of gallery image versions. Call ListNext() with this to fetch the next page of gallery image versions. NextLink *string `json:"nextLink,omitempty"` } -// DiskRestorePointListIterator provides access to a complete listing of DiskRestorePoint values. -type DiskRestorePointListIterator struct { +// GalleryImageVersionListIterator provides access to a complete listing of GalleryImageVersion values. +type GalleryImageVersionListIterator struct { i int - page DiskRestorePointListPage + page GalleryImageVersionListPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DiskRestorePointListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *GalleryImageVersionListIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionListIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -3234,67 +8370,67 @@ func (iter *DiskRestorePointListIterator) NextWithContext(ctx context.Context) ( // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *DiskRestorePointListIterator) Next() error { +func (iter *GalleryImageVersionListIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskRestorePointListIterator) NotDone() bool { +func (iter GalleryImageVersionListIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DiskRestorePointListIterator) Response() DiskRestorePointList { +func (iter GalleryImageVersionListIterator) Response() GalleryImageVersionList { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DiskRestorePointListIterator) Value() DiskRestorePoint { +func (iter GalleryImageVersionListIterator) Value() GalleryImageVersion { if !iter.page.NotDone() { - return DiskRestorePoint{} + return GalleryImageVersion{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the DiskRestorePointListIterator type. -func NewDiskRestorePointListIterator(page DiskRestorePointListPage) DiskRestorePointListIterator { - return DiskRestorePointListIterator{page: page} +// Creates a new instance of the GalleryImageVersionListIterator type. +func NewGalleryImageVersionListIterator(page GalleryImageVersionListPage) GalleryImageVersionListIterator { + return GalleryImageVersionListIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (drpl DiskRestorePointList) IsEmpty() bool { - return drpl.Value == nil || len(*drpl.Value) == 0 +func (givl GalleryImageVersionList) IsEmpty() bool { + return givl.Value == nil || len(*givl.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (drpl DiskRestorePointList) hasNextLink() bool { - return drpl.NextLink != nil && len(*drpl.NextLink) != 0 +func (givl GalleryImageVersionList) hasNextLink() bool { + return givl.NextLink != nil && len(*givl.NextLink) != 0 } -// diskRestorePointListPreparer prepares a request to retrieve the next set of results. +// galleryImageVersionListPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (drpl DiskRestorePointList) diskRestorePointListPreparer(ctx context.Context) (*http.Request, error) { - if !drpl.hasNextLink() { +func (givl GalleryImageVersionList) galleryImageVersionListPreparer(ctx context.Context) (*http.Request, error) { + if !givl.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(drpl.NextLink))) + autorest.WithBaseURL(to.String(givl.NextLink))) } -// DiskRestorePointListPage contains a page of DiskRestorePoint values. -type DiskRestorePointListPage struct { - fn func(context.Context, DiskRestorePointList) (DiskRestorePointList, error) - drpl DiskRestorePointList +// GalleryImageVersionListPage contains a page of GalleryImageVersion values. +type GalleryImageVersionListPage struct { + fn func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error) + givl GalleryImageVersionList } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DiskRestorePointListPage) NextWithContext(ctx context.Context) (err error) { +func (page *GalleryImageVersionListPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionListPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -3304,11 +8440,11 @@ func (page *DiskRestorePointListPage) NextWithContext(ctx context.Context) (err }() } for { - next, err := page.fn(ctx, page.drpl) + next, err := page.fn(ctx, page.givl) if err != nil { return err } - page.drpl = next + page.givl = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -3319,206 +8455,111 @@ func (page *DiskRestorePointListPage) NextWithContext(ctx context.Context) (err // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *DiskRestorePointListPage) Next() error { +func (page *GalleryImageVersionListPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskRestorePointListPage) NotDone() bool { - return !page.drpl.IsEmpty() +func (page GalleryImageVersionListPage) NotDone() bool { + return !page.givl.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DiskRestorePointListPage) Response() DiskRestorePointList { - return page.drpl +func (page GalleryImageVersionListPage) Response() GalleryImageVersionList { + return page.givl } // Values returns the slice of values for the current page or nil if there are no values. -func (page DiskRestorePointListPage) Values() []DiskRestorePoint { - if page.drpl.IsEmpty() { +func (page GalleryImageVersionListPage) Values() []GalleryImageVersion { + if page.givl.IsEmpty() { return nil } - return *page.drpl.Value + return *page.givl.Value } -// Creates a new instance of the DiskRestorePointListPage type. -func NewDiskRestorePointListPage(cur DiskRestorePointList, getNextPage func(context.Context, DiskRestorePointList) (DiskRestorePointList, error)) DiskRestorePointListPage { - return DiskRestorePointListPage{ +// Creates a new instance of the GalleryImageVersionListPage type. +func NewGalleryImageVersionListPage(cur GalleryImageVersionList, getNextPage func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error)) GalleryImageVersionListPage { + return GalleryImageVersionListPage{ fn: getNextPage, - drpl: cur, + givl: cur, } } -// DiskRestorePointProperties properties of an incremental disk restore point -type DiskRestorePointProperties struct { - // TimeCreated - READ-ONLY; The timestamp of restorePoint creation - TimeCreated *date.Time `json:"timeCreated,omitempty"` - // SourceResourceID - READ-ONLY; arm id of source disk - SourceResourceID *string `json:"sourceResourceId,omitempty"` - // OsType - READ-ONLY; The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' - HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` - // PurchasePlan - Purchase plan information for the the image from which the OS disk was created. - PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` - // FamilyID - READ-ONLY; id of the backing snapshot's MIS family - FamilyID *string `json:"familyId,omitempty"` - // SourceUniqueID - READ-ONLY; unique incarnation id of the source disk - SourceUniqueID *string `json:"sourceUniqueId,omitempty"` - // Encryption - READ-ONLY; Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. - Encryption *Encryption `json:"encryption,omitempty"` - // SupportsHibernation - Indicates the OS on a disk supports hibernation. - SupportsHibernation *bool `json:"supportsHibernation,omitempty"` +// GalleryImageVersionProperties describes the properties of a gallery image version. +type GalleryImageVersionProperties struct { + PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState3Creating', 'ProvisioningState3Updating', 'ProvisioningState3Failed', 'ProvisioningState3Succeeded', 'ProvisioningState3Deleting', 'ProvisioningState3Migrating' + ProvisioningState ProvisioningState3 `json:"provisioningState,omitempty"` + StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"` + // ReplicationStatus - READ-ONLY + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` } -// MarshalJSON is the custom marshaler for DiskRestorePointProperties. -func (drpp DiskRestorePointProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryImageVersionProperties. +func (givp GalleryImageVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if drpp.HyperVGeneration != "" { - objectMap["hyperVGeneration"] = drpp.HyperVGeneration - } - if drpp.PurchasePlan != nil { - objectMap["purchasePlan"] = drpp.PurchasePlan + if givp.PublishingProfile != nil { + objectMap["publishingProfile"] = givp.PublishingProfile } - if drpp.SupportsHibernation != nil { - objectMap["supportsHibernation"] = drpp.SupportsHibernation + if givp.StorageProfile != nil { + objectMap["storageProfile"] = givp.StorageProfile } return json.Marshal(objectMap) } -// DiskRestorePointRevokeAccessFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type DiskRestorePointRevokeAccessFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DiskRestorePointClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DiskRestorePointRevokeAccessFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DiskRestorePointRevokeAccessFuture.Result. -func (future *DiskRestorePointRevokeAccessFuture) result(client DiskRestorePointClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DiskRestorePointRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DiskRestorePointRevokeAccessFuture") - return - } - ar.Response = future.Response() - return -} - -// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DisksCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DisksClient) (Disk, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DisksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DisksCreateOrUpdateFuture.Result. -func (future *DisksCreateOrUpdateFuture) result(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - d.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.CreateOrUpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return -} - -// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(DisksClient) (autorest.Response, error) +// GalleryImageVersionPublishingProfile the publishing profile of a gallery image Version. +type GalleryImageVersionPublishingProfile struct { + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - READ-ONLY; The timestamp for when the gallery image version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` + // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow' + ReplicationMode ReplicationMode `json:"replicationMode,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DisksDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// MarshalJSON is the custom marshaler for GalleryImageVersionPublishingProfile. +func (givpp GalleryImageVersionPublishingProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if givpp.TargetRegions != nil { + objectMap["targetRegions"] = givpp.TargetRegions } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for DisksDeleteFuture.Result. -func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") - return + if givpp.ReplicaCount != nil { + objectMap["replicaCount"] = givpp.ReplicaCount } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") - return + if givpp.ExcludeFromLatest != nil { + objectMap["excludeFromLatest"] = givpp.ExcludeFromLatest } - ar.Response = future.Response() - return -} - -// DiskSecurityProfile contains the security related information for the resource. -type DiskSecurityProfile struct { - // SecurityType - Possible values include: 'DiskSecurityTypesTrustedLaunch' - SecurityType DiskSecurityTypes `json:"securityType,omitempty"` + if givpp.EndOfLifeDate != nil { + objectMap["endOfLifeDate"] = givpp.EndOfLifeDate + } + if givpp.StorageAccountType != "" { + objectMap["storageAccountType"] = givpp.StorageAccountType + } + if givpp.ReplicationMode != "" { + objectMap["replicationMode"] = givpp.ReplicationMode + } + return json.Marshal(objectMap) } -// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DisksGrantAccessFuture struct { +// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImageVersionsCreateOrUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(DisksClient) (AccessURI, error) + Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error { +func (future *GalleryImageVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -3528,58 +8569,40 @@ func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error { return nil } -// result is the default implementation for DisksGrantAccessFuture.Result. -func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI, err error) { +// result is the default implementation for GalleryImageVersionsCreateOrUpdateFuture.Result. +func (future *GalleryImageVersionsCreateOrUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - au.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + giv.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.CreateOrUpdateResponder(giv.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") } } return } -// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS, -// Premium_ZRS, or StandardSSD_ZRS. -type DiskSku struct { - // Name - The sku name. Possible values include: 'DiskStorageAccountTypesStandardLRS', 'DiskStorageAccountTypesPremiumLRS', 'DiskStorageAccountTypesStandardSSDLRS', 'DiskStorageAccountTypesUltraSSDLRS', 'DiskStorageAccountTypesPremiumZRS', 'DiskStorageAccountTypesStandardSSDZRS' - Name DiskStorageAccountTypes `json:"name,omitempty"` - // Tier - READ-ONLY; The sku tier. - Tier *string `json:"tier,omitempty"` -} - -// MarshalJSON is the custom marshaler for DiskSku. -func (ds DiskSku) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ds.Name != "" { - objectMap["name"] = ds.Name - } - return json.Marshal(objectMap) -} - -// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type DisksRevokeAccessFuture struct { +// GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImageVersionsDeleteFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(DisksClient) (autorest.Response, error) + Result func(GalleryImageVersionsClient) (autorest.Response, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error { +func (future *GalleryImageVersionsDeleteFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -3589,33 +8612,42 @@ func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error { return nil } -// result is the default implementation for DisksRevokeAccessFuture.Result. -func (future *DisksRevokeAccessFuture) result(client DisksClient) (ar autorest.Response, err error) { +// result is the default implementation for GalleryImageVersionsDeleteFuture.Result. +func (future *GalleryImageVersionsDeleteFuture) result(client GalleryImageVersionsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") return } ar.Response = future.Response() return } -// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksUpdateFuture struct { +// GalleryImageVersionStorageProfile this is the storage profile of a Gallery Image Version. +type GalleryImageVersionStorageProfile struct { + Source *GalleryArtifactVersionSource `json:"source,omitempty"` + OsDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"` + // DataDiskImages - A list of data disk images. + DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"` +} + +// GalleryImageVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImageVersionsUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(DisksClient) (Disk, error) + Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *GalleryImageVersionsUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -3625,54 +8657,56 @@ func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error { return nil } -// result is the default implementation for DisksUpdateFuture.Result. -func (future *DisksUpdateFuture) result(client DisksClient) (d Disk, err error) { +// result is the default implementation for GalleryImageVersionsUpdateFuture.Result. +func (future *GalleryImageVersionsUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - d.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + giv.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.UpdateResponder(d.Response.Response) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.UpdateResponder(giv.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") } } return } -// DiskUpdate disk update resource. -type DiskUpdate struct { - *DiskUpdateProperties `json:"properties,omitempty"` +// GalleryImageVersionUpdate specifies information about the gallery image version that you want to update. +type GalleryImageVersionUpdate struct { + *GalleryImageVersionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` - Sku *DiskSku `json:"sku,omitempty"` } -// MarshalJSON is the custom marshaler for DiskUpdate. -func (du DiskUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryImageVersionUpdate. +func (givu GalleryImageVersionUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if du.DiskUpdateProperties != nil { - objectMap["properties"] = du.DiskUpdateProperties - } - if du.Tags != nil { - objectMap["tags"] = du.Tags + if givu.GalleryImageVersionProperties != nil { + objectMap["properties"] = givu.GalleryImageVersionProperties } - if du.Sku != nil { - objectMap["sku"] = du.Sku + if givu.Tags != nil { + objectMap["tags"] = givu.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. -func (du *DiskUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryImageVersionUpdate struct. +func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -3682,30 +8716,48 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskUpdateProperties DiskUpdateProperties - err = json.Unmarshal(*v, &diskUpdateProperties) + var galleryImageVersionProperties GalleryImageVersionProperties + err = json.Unmarshal(*v, &galleryImageVersionProperties) if err != nil { return err } - du.DiskUpdateProperties = &diskUpdateProperties + givu.GalleryImageVersionProperties = &galleryImageVersionProperties } - case "tags": + case "id": if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) + var ID string + err = json.Unmarshal(*v, &ID) if err != nil { return err } - du.Tags = tags + givu.ID = &ID } - case "sku": + case "name": if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) + var name string + err = json.Unmarshal(*v, &name) if err != nil { return err } - du.Sku = &sku + givu.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + givu.Type = &typeVar + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + givu.Tags = tags } } } @@ -3713,277 +8765,226 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error { return nil } -// DiskUpdateProperties disk resource update properties. -type DiskUpdateProperties struct { - // OsType - the Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. - EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` - // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. - DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` - // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. - DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` - // DiskIOPSReadOnly - The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. - DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"` - // DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. - DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"` - // MaxShares - The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. - MaxShares *int32 `json:"maxShares,omitempty"` - // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. - Encryption *Encryption `json:"encryption,omitempty"` - // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll' - NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` - // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. - DiskAccessID *string `json:"diskAccessId,omitempty"` - // Tier - Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks. - Tier *string `json:"tier,omitempty"` - // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. - BurstingEnabled *bool `json:"burstingEnabled,omitempty"` - // PurchasePlan - Purchase plan information to be added on the OS disk - PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` - // PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending. - PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"` - // SupportsHibernation - Indicates the OS on a disk supports hibernation. - SupportsHibernation *bool `json:"supportsHibernation,omitempty"` +// GalleryList the List Galleries operation response. +type GalleryList struct { + autorest.Response `json:"-"` + // Value - A list of galleries. + Value *[]Gallery `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of galleries. Call ListNext() with this to fetch the next page of galleries. + NextLink *string `json:"nextLink,omitempty"` } -// MarshalJSON is the custom marshaler for DiskUpdateProperties. -func (dup DiskUpdateProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dup.OsType != "" { - objectMap["osType"] = dup.OsType - } - if dup.DiskSizeGB != nil { - objectMap["diskSizeGB"] = dup.DiskSizeGB - } - if dup.EncryptionSettingsCollection != nil { - objectMap["encryptionSettingsCollection"] = dup.EncryptionSettingsCollection - } - if dup.DiskIOPSReadWrite != nil { - objectMap["diskIOPSReadWrite"] = dup.DiskIOPSReadWrite - } - if dup.DiskMBpsReadWrite != nil { - objectMap["diskMBpsReadWrite"] = dup.DiskMBpsReadWrite - } - if dup.DiskIOPSReadOnly != nil { - objectMap["diskIOPSReadOnly"] = dup.DiskIOPSReadOnly - } - if dup.DiskMBpsReadOnly != nil { - objectMap["diskMBpsReadOnly"] = dup.DiskMBpsReadOnly - } - if dup.MaxShares != nil { - objectMap["maxShares"] = dup.MaxShares - } - if dup.Encryption != nil { - objectMap["encryption"] = dup.Encryption - } - if dup.NetworkAccessPolicy != "" { - objectMap["networkAccessPolicy"] = dup.NetworkAccessPolicy - } - if dup.DiskAccessID != nil { - objectMap["diskAccessId"] = dup.DiskAccessID - } - if dup.Tier != nil { - objectMap["tier"] = dup.Tier - } - if dup.BurstingEnabled != nil { - objectMap["burstingEnabled"] = dup.BurstingEnabled +// GalleryListIterator provides access to a complete listing of Gallery values. +type GalleryListIterator struct { + i int + page GalleryListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() } - if dup.PurchasePlan != nil { - objectMap["purchasePlan"] = dup.PurchasePlan + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil } - if dup.SupportsHibernation != nil { - objectMap["supportsHibernation"] = dup.SupportsHibernation + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err } - return json.Marshal(objectMap) + iter.i = 0 + return nil } -// Encryption encryption at rest settings for disk or snapshot -type Encryption struct { - // DiskEncryptionSetID - ResourceId of the disk encryption set to use for enabling encryption at rest. - DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` - // Type - Possible values include: 'EncryptionTypeEncryptionAtRestWithPlatformKey', 'EncryptionTypeEncryptionAtRestWithCustomerKey', 'EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys' - Type EncryptionType `json:"type,omitempty"` +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *GalleryListIterator) Next() error { + return iter.NextWithContext(context.Background()) } -// EncryptionImages optional. Allows users to provide customer managed keys for encrypting the OS and data -// disks in the gallery artifact. -type EncryptionImages struct { - OsDiskImage *OSDiskImageEncryption `json:"osDiskImage,omitempty"` - // DataDiskImages - A list of encryption specifications for data disk images. - DataDiskImages *[]DataDiskImageEncryption `json:"dataDiskImages,omitempty"` +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) } -// EncryptionSetIdentity the managed identity for the disk encryption set. It should be given permission on -// the key vault before it can be used to encrypt disks. -type EncryptionSetIdentity struct { - // Type - The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. Possible values include: 'DiskEncryptionSetIdentityTypeSystemAssigned', 'DiskEncryptionSetIdentityTypeNone' - Type DiskEncryptionSetIdentityType `json:"type,omitempty"` - // PrincipalID - READ-ONLY; The object id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a systemAssigned(implicit) identity - PrincipalID *string `json:"principalId,omitempty"` - // TenantID - READ-ONLY; The tenant id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a systemAssigned(implicit) identity - TenantID *string `json:"tenantId,omitempty"` +// Response returns the raw server response from the last page request. +func (iter GalleryListIterator) Response() GalleryList { + return iter.page.Response() } -// MarshalJSON is the custom marshaler for EncryptionSetIdentity. -func (esi EncryptionSetIdentity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if esi.Type != "" { - objectMap["type"] = esi.Type +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryListIterator) Value() Gallery { + if !iter.page.NotDone() { + return Gallery{} } - return json.Marshal(objectMap) + return iter.page.Values()[iter.i] } -// EncryptionSetProperties ... -type EncryptionSetProperties struct { - // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys' - EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"` - // ActiveKey - The key vault key which is currently used by this disk encryption set. - ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"` - // PreviousKeys - READ-ONLY; A readonly collection of key vault keys previously used by this disk encryption set while a key rotation is in progress. It will be empty if there is no ongoing key rotation. - PreviousKeys *[]KeyForDiskEncryptionSet `json:"previousKeys,omitempty"` - // ProvisioningState - READ-ONLY; The disk encryption set provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version. - RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"` - // LastKeyRotationTimestamp - READ-ONLY; The time when the active key of this disk encryption set was updated. - LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"` +// Creates a new instance of the GalleryListIterator type. +func NewGalleryListIterator(page GalleryListPage) GalleryListIterator { + return GalleryListIterator{page: page} } -// MarshalJSON is the custom marshaler for EncryptionSetProperties. -func (esp EncryptionSetProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if esp.EncryptionType != "" { - objectMap["encryptionType"] = esp.EncryptionType +// IsEmpty returns true if the ListResult contains no values. +func (gl GalleryList) IsEmpty() bool { + return gl.Value == nil || len(*gl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (gl GalleryList) hasNextLink() bool { + return gl.NextLink != nil && len(*gl.NextLink) != 0 +} + +// galleryListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gl GalleryList) galleryListPreparer(ctx context.Context) (*http.Request, error) { + if !gl.hasNextLink() { + return nil, nil } - if esp.ActiveKey != nil { - objectMap["activeKey"] = esp.ActiveKey + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gl.NextLink))) +} + +// GalleryListPage contains a page of Gallery values. +type GalleryListPage struct { + fn func(context.Context, GalleryList) (GalleryList, error) + gl GalleryList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() } - if esp.RotationToLatestKeyVersionEnabled != nil { - objectMap["rotationToLatestKeyVersionEnabled"] = esp.RotationToLatestKeyVersionEnabled + for { + next, err := page.fn(ctx, page.gl) + if err != nil { + return err + } + page.gl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } } - return json.Marshal(objectMap) + return nil } -// EncryptionSettingsCollection encryption settings for disk or snapshot -type EncryptionSettingsCollection struct { - // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. - Enabled *bool `json:"enabled,omitempty"` - // EncryptionSettings - A collection of encryption settings, one for each disk volume. - EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"` - // EncryptionSettingsVersion - Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption. - EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"` +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *GalleryListPage) Next() error { + return page.NextWithContext(context.Background()) } -// EncryptionSettingsElement encryption settings for one disk volume. -type EncryptionSettingsElement struct { - // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key - DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. - KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryListPage) NotDone() bool { + return !page.gl.IsEmpty() } -// ExtendedLocation the complex type of the extended location. -type ExtendedLocation struct { - // Name - The name of the extended location. - Name *string `json:"name,omitempty"` - // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone' - Type ExtendedLocationTypes `json:"type,omitempty"` +// Response returns the raw server response from the last page request. +func (page GalleryListPage) Response() GalleryList { + return page.gl } -// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleriesCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleriesClient) (Gallery, error) +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryListPage) Values() []Gallery { + if page.gl.IsEmpty() { + return nil + } + return *page.gl.Value } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleriesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// Creates a new instance of the GalleryListPage type. +func NewGalleryListPage(cur GalleryList, getNextPage func(context.Context, GalleryList) (GalleryList, error)) GalleryListPage { + return GalleryListPage{ + fn: getNextPage, + gl: cur, } - future.FutureAPI = &azFuture - future.Result = future.result - return nil } -// result is the default implementation for GalleriesCreateOrUpdateFuture.Result. -func (future *GalleriesCreateOrUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - g.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") - return +// GalleryOSDiskImage this is the OS disk image. +type GalleryOSDiskImage struct { + // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` + Source *GalleryArtifactVersionSource `json:"source,omitempty"` +} + +// MarshalJSON is the custom marshaler for GalleryOSDiskImage. +func (godi GalleryOSDiskImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if godi.HostCaching != "" { + objectMap["hostCaching"] = godi.HostCaching } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { - g, err = client.CreateOrUpdateResponder(g.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") - } + if godi.Source != nil { + objectMap["source"] = godi.Source } - return + return json.Marshal(objectMap) } -// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleriesDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleriesClient) (autorest.Response, error) +// GalleryProperties describes the properties of a Shared Image Gallery. +type GalleryProperties struct { + // Description - The description of this Shared Image Gallery resource. This property is updatable. + Description *string `json:"description,omitempty"` + Identifier *GalleryIdentifier `json:"identifier,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateUpdating', 'ProvisioningStateFailed', 'ProvisioningStateSucceeded', 'ProvisioningStateDeleting', 'ProvisioningStateMigrating' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + SharingProfile *SharingProfile `json:"sharingProfile,omitempty"` + SoftDeletePolicy *SoftDeletePolicy `json:"softDeletePolicy,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleriesDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// MarshalJSON is the custom marshaler for GalleryProperties. +func (gp GalleryProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gp.Description != nil { + objectMap["description"] = gp.Description } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for GalleriesDeleteFuture.Result. -func (future *GalleriesDeleteFuture) result(client GalleriesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") - return + if gp.Identifier != nil { + objectMap["identifier"] = gp.Identifier } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") - return + if gp.SharingProfile != nil { + objectMap["sharingProfile"] = gp.SharingProfile } - ar.Response = future.Response() - return + if gp.SoftDeletePolicy != nil { + objectMap["softDeletePolicy"] = gp.SoftDeletePolicy + } + return json.Marshal(objectMap) } -// GalleriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleriesUpdateFuture struct { +// GallerySharingProfileUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GallerySharingProfileUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(GalleriesClient) (Gallery, error) + Result func(GallerySharingProfileClient) (SharingUpdate, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleriesUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *GallerySharingProfileUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -3993,32 +8994,31 @@ func (future *GalleriesUpdateFuture) UnmarshalJSON(body []byte) error { return nil } -// result is the default implementation for GalleriesUpdateFuture.Result. -func (future *GalleriesUpdateFuture) result(client GalleriesClient) (g Gallery, err error) { +// result is the default implementation for GallerySharingProfileUpdateFuture.Result. +func (future *GallerySharingProfileUpdateFuture) result(client GallerySharingProfileClient) (su SharingUpdate, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - g.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleriesUpdateFuture") + su.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.GallerySharingProfileUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { - g, err = client.UpdateResponder(g.Response.Response) + if su.Response.Response, err = future.GetResult(sender); err == nil && su.Response.Response.StatusCode != http.StatusNoContent { + su, err = client.UpdateResponder(su.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileUpdateFuture", "Result", su.Response.Response, "Failure responding to request") } } return } -// Gallery specifies information about the Shared Image Gallery that you want to create or update. -type Gallery struct { - autorest.Response `json:"-"` +// GalleryUpdate specifies information about the Shared Image Gallery that you want to update. +type GalleryUpdate struct { *GalleryProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` @@ -4026,29 +9026,24 @@ type Gallery struct { Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for Gallery. -func (g Gallery) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryUpdate. +func (gu GalleryUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if g.GalleryProperties != nil { - objectMap["properties"] = g.GalleryProperties - } - if g.Location != nil { - objectMap["location"] = g.Location + if gu.GalleryProperties != nil { + objectMap["properties"] = gu.GalleryProperties } - if g.Tags != nil { - objectMap["tags"] = g.Tags + if gu.Tags != nil { + objectMap["tags"] = gu.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for Gallery struct. -func (g *Gallery) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryUpdate struct. +func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -4063,7 +9058,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error { if err != nil { return err } - g.GalleryProperties = &galleryProperties + gu.GalleryProperties = &galleryProperties } case "id": if v != nil { @@ -4072,7 +9067,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error { if err != nil { return err } - g.ID = &ID + gu.ID = &ID } case "name": if v != nil { @@ -4081,7 +9076,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error { if err != nil { return err } - g.Name = &name + gu.Name = &name } case "type": if v != nil { @@ -4090,16 +9085,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error { if err != nil { return err } - g.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - g.Location = &location + gu.Type = &typeVar } case "tags": if v != nil { @@ -4108,7 +9094,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error { if err != nil { return err } - g.Tags = tags + gu.Tags = tags } } } @@ -4116,11 +9102,30 @@ func (g *Gallery) UnmarshalJSON(body []byte) error { return nil } -// GalleryApplication specifies information about the gallery Application Definition that you want to -// create or update. -type GalleryApplication struct { - autorest.Response `json:"-"` - *GalleryApplicationProperties `json:"properties,omitempty"` +// GrantAccessData data used for requesting a SAS. +type GrantAccessData struct { + // Access - Possible values include: 'AccessLevelNone', 'AccessLevelRead', 'AccessLevelWrite' + Access AccessLevel `json:"access,omitempty"` + // DurationInSeconds - Time duration in seconds until the SAS access expires. + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + +// HardwareProfile specifies the hardware settings for the virtual machine. +type HardwareProfile struct { + // VMSize - Specifies the size of the virtual machine.

The enum data type is currently deprecated and will be removed by December 23rd 2023.

Recommended way to get the list of available sizes is using these APIs:

[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

[List all available virtual machine sizes in a region]( https://docs.microsoft.com/rest/api/compute/resourceskus/list)

[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/sizes).

The available VM sizes depend on region and availability set. Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24' + VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` + // VMSizeProperties - Specifies the properties for customizing the size of the virtual machine. Minimum api-version: 2021-07-01.

This feature is still in preview mode and is not supported for VirtualMachineScaleSet.

Please follow the instructions in [VM Customization](https://aka.ms/vmcustomization) for more details. + VMSizeProperties *VMSizeProperties `json:"vmSizeProperties,omitempty"` +} + +// Image the source user image virtual hard disk. The virtual hard disk will be copied before being +// attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not +// exist. +type Image struct { + autorest.Response `json:"-"` + *ImageProperties `json:"properties,omitempty"` + // ExtendedLocation - The extended location of the Image. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -4133,23 +9138,26 @@ type GalleryApplication struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for GalleryApplication. -func (ga GalleryApplication) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for Image. +func (i Image) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if ga.GalleryApplicationProperties != nil { - objectMap["properties"] = ga.GalleryApplicationProperties + if i.ImageProperties != nil { + objectMap["properties"] = i.ImageProperties } - if ga.Location != nil { - objectMap["location"] = ga.Location + if i.ExtendedLocation != nil { + objectMap["extendedLocation"] = i.ExtendedLocation } - if ga.Tags != nil { - objectMap["tags"] = ga.Tags + if i.Location != nil { + objectMap["location"] = i.Location + } + if i.Tags != nil { + objectMap["tags"] = i.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryApplication struct. -func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for Image struct. +func (i *Image) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -4159,12 +9167,21 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var galleryApplicationProperties GalleryApplicationProperties - err = json.Unmarshal(*v, &galleryApplicationProperties) + var imageProperties ImageProperties + err = json.Unmarshal(*v, &imageProperties) if err != nil { return err } - ga.GalleryApplicationProperties = &galleryApplicationProperties + i.ImageProperties = &imageProperties + } + case "extendedLocation": + if v != nil { + var extendedLocation ExtendedLocation + err = json.Unmarshal(*v, &extendedLocation) + if err != nil { + return err + } + i.ExtendedLocation = &extendedLocation } case "id": if v != nil { @@ -4173,7 +9190,7 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { if err != nil { return err } - ga.ID = &ID + i.ID = &ID } case "name": if v != nil { @@ -4182,7 +9199,7 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { if err != nil { return err } - ga.Name = &name + i.Name = &name } case "type": if v != nil { @@ -4191,7 +9208,7 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { if err != nil { return err } - ga.Type = &typeVar + i.Type = &typeVar } case "location": if v != nil { @@ -4200,7 +9217,7 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { if err != nil { return err } - ga.Location = &location + i.Location = &location } case "tags": if v != nil { @@ -4209,7 +9226,7 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { if err != nil { return err } - ga.Tags = tags + i.Tags = tags } } } @@ -4217,26 +9234,72 @@ func (ga *GalleryApplication) UnmarshalJSON(body []byte) error { return nil } -// GalleryApplicationList the List Gallery Applications operation response. -type GalleryApplicationList struct { +// ImageDataDisk describes a data disk. +type ImageDataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// ImageDisk describes a image disk. +type ImageDisk struct { + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// ImageDiskReference the source image used for creating the disk. +type ImageDiskReference struct { + // ID - A relative uri containing either a Platform Image Repository or user image reference. + ID *string `json:"id,omitempty"` + // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. + Lun *int32 `json:"lun,omitempty"` +} + +// ImageListResult the List Image operation response. +type ImageListResult struct { autorest.Response `json:"-"` - // Value - A list of Gallery Applications. - Value *[]GalleryApplication `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Application Definitions in the Application Gallery. Call ListNext() with this to fetch the next page of gallery Application Definitions. + // Value - The list of Images. + Value *[]Image `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images. NextLink *string `json:"nextLink,omitempty"` } -// GalleryApplicationListIterator provides access to a complete listing of GalleryApplication values. -type GalleryApplicationListIterator struct { +// ImageListResultIterator provides access to a complete listing of Image values. +type ImageListResultIterator struct { i int - page GalleryApplicationListPage + page ImageListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *GalleryApplicationListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *ImageListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ImageListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -4261,67 +9324,67 @@ func (iter *GalleryApplicationListIterator) NextWithContext(ctx context.Context) // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *GalleryApplicationListIterator) Next() error { +func (iter *ImageListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter GalleryApplicationListIterator) NotDone() bool { +func (iter ImageListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter GalleryApplicationListIterator) Response() GalleryApplicationList { +func (iter ImageListResultIterator) Response() ImageListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter GalleryApplicationListIterator) Value() GalleryApplication { +func (iter ImageListResultIterator) Value() Image { if !iter.page.NotDone() { - return GalleryApplication{} + return Image{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the GalleryApplicationListIterator type. -func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator { - return GalleryApplicationListIterator{page: page} +// Creates a new instance of the ImageListResultIterator type. +func NewImageListResultIterator(page ImageListResultPage) ImageListResultIterator { + return ImageListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (gal GalleryApplicationList) IsEmpty() bool { - return gal.Value == nil || len(*gal.Value) == 0 +func (ilr ImageListResult) IsEmpty() bool { + return ilr.Value == nil || len(*ilr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (gal GalleryApplicationList) hasNextLink() bool { - return gal.NextLink != nil && len(*gal.NextLink) != 0 +func (ilr ImageListResult) hasNextLink() bool { + return ilr.NextLink != nil && len(*ilr.NextLink) != 0 } -// galleryApplicationListPreparer prepares a request to retrieve the next set of results. +// imageListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (gal GalleryApplicationList) galleryApplicationListPreparer(ctx context.Context) (*http.Request, error) { - if !gal.hasNextLink() { +func (ilr ImageListResult) imageListResultPreparer(ctx context.Context) (*http.Request, error) { + if !ilr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(gal.NextLink))) + autorest.WithBaseURL(to.String(ilr.NextLink))) } -// GalleryApplicationListPage contains a page of GalleryApplication values. -type GalleryApplicationListPage struct { - fn func(context.Context, GalleryApplicationList) (GalleryApplicationList, error) - gal GalleryApplicationList +// ImageListResultPage contains a page of Image values. +type ImageListResultPage struct { + fn func(context.Context, ImageListResult) (ImageListResult, error) + ilr ImageListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (err error) { +func (page *ImageListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ImageListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -4331,11 +9394,11 @@ func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (er }() } for { - next, err := page.fn(ctx, page.gal) + next, err := page.fn(ctx, page.ilr) if err != nil { return err } - page.gal = next + page.ilr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -4346,63 +9409,152 @@ func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (er // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *GalleryApplicationListPage) Next() error { +func (page *ImageListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page GalleryApplicationListPage) NotDone() bool { - return !page.gal.IsEmpty() +func (page ImageListResultPage) NotDone() bool { + return !page.ilr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page GalleryApplicationListPage) Response() GalleryApplicationList { - return page.gal +func (page ImageListResultPage) Response() ImageListResult { + return page.ilr } // Values returns the slice of values for the current page or nil if there are no values. -func (page GalleryApplicationListPage) Values() []GalleryApplication { - if page.gal.IsEmpty() { +func (page ImageListResultPage) Values() []Image { + if page.ilr.IsEmpty() { return nil } - return *page.gal.Value + return *page.ilr.Value } -// Creates a new instance of the GalleryApplicationListPage type. -func NewGalleryApplicationListPage(cur GalleryApplicationList, getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage { - return GalleryApplicationListPage{ +// Creates a new instance of the ImageListResultPage type. +func NewImageListResultPage(cur ImageListResult, getNextPage func(context.Context, ImageListResult) (ImageListResult, error)) ImageListResultPage { + return ImageListResultPage{ fn: getNextPage, - gal: cur, + ilr: cur, } } -// GalleryApplicationProperties describes the properties of a gallery Application Definition. -type GalleryApplicationProperties struct { - // Description - The description of this gallery Application Definition resource. This property is updatable. - Description *string `json:"description,omitempty"` - // Eula - The Eula agreement for the gallery Application Definition. - Eula *string `json:"eula,omitempty"` - // PrivacyStatementURI - The privacy statement uri. - PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` - // ReleaseNoteURI - The release note uri. - ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` - // EndOfLifeDate - The end of life date of the gallery Application Definition. This property can be used for decommissioning purposes. This property is updatable. - EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` - // SupportedOSType - This property allows you to specify the supported type of the OS that application is built for.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - SupportedOSType OperatingSystemTypes `json:"supportedOSType,omitempty"` +// ImageOSDisk describes an Operating System disk. +type ImageOSDisk struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - The OS State. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` } -// GalleryApplicationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryApplicationsCreateOrUpdateFuture struct { +// ImageProperties describes the properties of an Image. +type ImageProperties struct { + // SourceVirtualMachine - The source virtual machine from which Image is created. + SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // HyperVGeneration - Specifies the HyperVGenerationType of the VirtualMachine created from the image. From API Version 2019-03-01 if the image source is a blob, then we need the user to specify the value, if the source is managed resource like disk or snapshot, we may require the user to specify the property if we cannot deduce it from the source managed resource. Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2' + HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImageProperties. +func (IP ImageProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if IP.SourceVirtualMachine != nil { + objectMap["sourceVirtualMachine"] = IP.SourceVirtualMachine + } + if IP.StorageProfile != nil { + objectMap["storageProfile"] = IP.StorageProfile + } + if IP.HyperVGeneration != "" { + objectMap["hyperVGeneration"] = IP.HyperVGeneration + } + return json.Marshal(objectMap) +} + +// ImagePurchasePlan describes the gallery image definition purchase plan. This is used by marketplace +// images. +type ImagePurchasePlan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - The product ID. + Product *string `json:"product,omitempty"` +} + +// ImageReference specifies information about the image to use. You can specify information about platform +// images, marketplace images, or virtual machine images. This element is required when you want to use a +// platform image, marketplace image, or virtual machine image, but is not used in other creation +// operations. NOTE: Image reference publisher and offer can only be set when you create the scale set. +type ImageReference struct { + // Publisher - The image publisher. + Publisher *string `json:"publisher,omitempty"` + // Offer - Specifies the offer of the platform image or marketplace image used to create the virtual machine. + Offer *string `json:"offer,omitempty"` + // Sku - The image SKU. + Sku *string `json:"sku,omitempty"` + // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. + Version *string `json:"version,omitempty"` + // ExactVersion - READ-ONLY; Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual machine. This readonly field differs from 'version', only if the value specified in 'version' field is 'latest'. + ExactVersion *string `json:"exactVersion,omitempty"` + // SharedGalleryImageID - Specified the shared gallery image unique id for vm deployment. This can be fetched from shared gallery image GET call. + SharedGalleryImageID *string `json:"sharedGalleryImageId,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImageReference. +func (ir ImageReference) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ir.Publisher != nil { + objectMap["publisher"] = ir.Publisher + } + if ir.Offer != nil { + objectMap["offer"] = ir.Offer + } + if ir.Sku != nil { + objectMap["sku"] = ir.Sku + } + if ir.Version != nil { + objectMap["version"] = ir.Version + } + if ir.SharedGalleryImageID != nil { + objectMap["sharedGalleryImageId"] = ir.SharedGalleryImageID + } + if ir.ID != nil { + objectMap["id"] = ir.ID + } + return json.Marshal(objectMap) +} + +// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ImagesCreateOrUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(GalleryApplicationsClient) (GalleryApplication, error) + Result func(ImagesClient) (Image, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryApplicationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *ImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -4412,40 +9564,39 @@ func (future *GalleryApplicationsCreateOrUpdateFuture) UnmarshalJSON(body []byte return nil } -// result is the default implementation for GalleryApplicationsCreateOrUpdateFuture.Result. -func (future *GalleryApplicationsCreateOrUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { +// result is the default implementation for ImagesCreateOrUpdateFuture.Result. +func (future *ImagesCreateOrUpdateFuture) result(client ImagesClient) (i Image, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - ga.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture") + i.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { - ga, err = client.CreateOrUpdateResponder(ga.Response.Response) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") } } return } -// GalleryApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryApplicationsDeleteFuture struct { +// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesDeleteFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(GalleryApplicationsClient) (autorest.Response, error) + Result func(ImagesClient) (autorest.Response, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryApplicationsDeleteFuture) UnmarshalJSON(body []byte) error { +func (future *ImagesDeleteFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -4455,34 +9606,43 @@ func (future *GalleryApplicationsDeleteFuture) UnmarshalJSON(body []byte) error return nil } -// result is the default implementation for GalleryApplicationsDeleteFuture.Result. -func (future *GalleryApplicationsDeleteFuture) result(client GalleryApplicationsClient) (ar autorest.Response, err error) { +// result is the default implementation for ImagesDeleteFuture.Result. +func (future *ImagesDeleteFuture) result(client ImagesClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture") + err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") return } ar.Response = future.Response() return } -// GalleryApplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryApplicationsUpdateFuture struct { +// ImageStorageProfile describes a storage profile. +type ImageStorageProfile struct { + // OsDisk - Specifies information about the operating system disk used by the virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). + OsDisk *ImageOSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). + DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"` + // ZoneResilient - Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS). + ZoneResilient *bool `json:"zoneResilient,omitempty"` +} + +// ImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesUpdateFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(GalleryApplicationsClient) (GalleryApplication, error) + Result func(ImagesClient) (Image, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryApplicationsUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *ImagesUpdateFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -4492,57 +9652,50 @@ func (future *GalleryApplicationsUpdateFuture) UnmarshalJSON(body []byte) error return nil } -// result is the default implementation for GalleryApplicationsUpdateFuture.Result. -func (future *GalleryApplicationsUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { +// result is the default implementation for ImagesUpdateFuture.Result. +func (future *ImagesUpdateFuture) result(client ImagesClient) (i Image, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - ga.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsUpdateFuture") + i.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { - ga, err = client.UpdateResponder(ga.Response.Response) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.UpdateResponder(i.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") } } return } -// GalleryApplicationUpdate specifies information about the gallery Application Definition that you want to -// update. -type GalleryApplicationUpdate struct { - *GalleryApplicationProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` +// ImageUpdate the source user image virtual hard disk. Only tags may be updated. +type ImageUpdate struct { + *ImageProperties `json:"properties,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for GalleryApplicationUpdate. -func (gau GalleryApplicationUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ImageUpdate. +func (iu ImageUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if gau.GalleryApplicationProperties != nil { - objectMap["properties"] = gau.GalleryApplicationProperties + if iu.ImageProperties != nil { + objectMap["properties"] = iu.ImageProperties } - if gau.Tags != nil { - objectMap["tags"] = gau.Tags + if iu.Tags != nil { + objectMap["tags"] = iu.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryApplicationUpdate struct. -func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for ImageUpdate struct. +func (iu *ImageUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -4552,39 +9705,12 @@ func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var galleryApplicationProperties GalleryApplicationProperties - err = json.Unmarshal(*v, &galleryApplicationProperties) - if err != nil { - return err - } - gau.GalleryApplicationProperties = &galleryApplicationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - gau.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - gau.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) + var imageProperties ImageProperties + err = json.Unmarshal(*v, &imageProperties) if err != nil { return err } - gau.Type = &typeVar + iu.ImageProperties = &imageProperties } case "tags": if v != nil { @@ -4593,7 +9719,7 @@ func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - gau.Tags = tags + iu.Tags = tags } } } @@ -4601,128 +9727,182 @@ func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error { return nil } -// GalleryApplicationVersion specifies information about the gallery Application Version that you want to -// create or update. -type GalleryApplicationVersion struct { - autorest.Response `json:"-"` - *GalleryApplicationVersionProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name +// InnerError inner error details. +type InnerError struct { + // Exceptiontype - The exception type. + Exceptiontype *string `json:"exceptiontype,omitempty"` + // Errordetail - The internal error message or exception dump. + Errordetail *string `json:"errordetail,omitempty"` +} + +// InstanceSku ... +type InstanceSku struct { + // Name - READ-ONLY; The sku name. Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` + // Tier - READ-ONLY; The tier of the cloud service role instance. + Tier *string `json:"tier,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryApplicationVersion. -func (gav GalleryApplicationVersion) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for InstanceSku. +func (is InstanceSku) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if gav.GalleryApplicationVersionProperties != nil { - objectMap["properties"] = gav.GalleryApplicationVersionProperties - } - if gav.Location != nil { - objectMap["location"] = gav.Location - } - if gav.Tags != nil { - objectMap["tags"] = gav.Tags - } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersion struct. -func (gav *GalleryApplicationVersion) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var galleryApplicationVersionProperties GalleryApplicationVersionProperties - err = json.Unmarshal(*v, &galleryApplicationVersionProperties) - if err != nil { - return err - } - gav.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - gav.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - gav.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - gav.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - gav.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - gav.Tags = tags - } - } - } +// InstanceViewStatus instance view status. +type InstanceViewStatus struct { + // Code - The status code. + Code *string `json:"code,omitempty"` + // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError' + Level StatusLevelTypes `json:"level,omitempty"` + // DisplayStatus - The short localizable label for the status. + DisplayStatus *string `json:"displayStatus,omitempty"` + // Message - The detailed status message, including for alerts and error messages. + Message *string `json:"message,omitempty"` + // Time - The time of the status. + Time *date.Time `json:"time,omitempty"` +} + +// InstanceViewStatusesSummary instance view statuses. +type InstanceViewStatusesSummary struct { + // StatusesSummary - READ-ONLY + StatusesSummary *[]StatusCodeCount `json:"statusesSummary,omitempty"` +} + +// MarshalJSON is the custom marshaler for InstanceViewStatusesSummary. +func (ivss InstanceViewStatusesSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// KeyForDiskEncryptionSet key Vault Key Url to be used for server side encryption of Managed Disks and +// Snapshots +type KeyForDiskEncryptionSet struct { + // SourceVault - Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription. + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // KeyURL - Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used +// to unwrap the encryptionKey +type KeyVaultAndKeyReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // KeyURL - Url pointing to a key or secret in KeyVault + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key +type KeyVaultAndSecretReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // SecretURL - Url pointing to a key or secret in KeyVault + SecretURL *string `json:"secretUrl,omitempty"` +} + +// KeyVaultKeyReference describes a reference to Key Vault Key +type KeyVaultKeyReference struct { + // KeyURL - The URL referencing a key encryption key in Key Vault. + KeyURL *string `json:"keyUrl,omitempty"` + // SourceVault - The relative URL of the Key Vault containing the key. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// KeyVaultSecretReference describes a reference to Key Vault Secret +type KeyVaultSecretReference struct { + // SecretURL - The URL referencing a secret in a Key Vault. + SecretURL *string `json:"secretUrl,omitempty"` + // SourceVault - The relative URL of the Key Vault containing the secret. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// LastPatchInstallationSummary describes the properties of the last installed patch summary. +type LastPatchInstallationSummary struct { + // Status - READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values include: 'PatchOperationStatusUnknown', 'PatchOperationStatusInProgress', 'PatchOperationStatusFailed', 'PatchOperationStatusSucceeded', 'PatchOperationStatusCompletedWithWarnings' + Status PatchOperationStatus `json:"status,omitempty"` + // InstallationActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs. + InstallationActivityID *string `json:"installationActivityId,omitempty"` + // MaintenanceWindowExceeded - READ-ONLY; Describes whether the operation ran out of time before it completed all its intended actions + MaintenanceWindowExceeded *bool `json:"maintenanceWindowExceeded,omitempty"` + // NotSelectedPatchCount - READ-ONLY; The number of all available patches but not going to be installed because it didn't match a classification or inclusion list entry. + NotSelectedPatchCount *int32 `json:"notSelectedPatchCount,omitempty"` + // ExcludedPatchCount - READ-ONLY; The number of all available patches but excluded explicitly by a customer-specified exclusion list match. + ExcludedPatchCount *int32 `json:"excludedPatchCount,omitempty"` + // PendingPatchCount - READ-ONLY; The number of all available patches expected to be installed over the course of the patch installation operation. + PendingPatchCount *int32 `json:"pendingPatchCount,omitempty"` + // InstalledPatchCount - READ-ONLY; The count of patches that successfully installed. + InstalledPatchCount *int32 `json:"installedPatchCount,omitempty"` + // FailedPatchCount - READ-ONLY; The count of patches that failed installation. + FailedPatchCount *int32 `json:"failedPatchCount,omitempty"` + // StartTime - READ-ONLY; The UTC timestamp when the operation began. + StartTime *date.Time `json:"startTime,omitempty"` + // LastModifiedTime - READ-ONLY; The UTC timestamp when the operation began. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // Error - READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them. + Error *APIError `json:"error,omitempty"` +} - return nil +// MarshalJSON is the custom marshaler for LastPatchInstallationSummary. +func (lpis LastPatchInstallationSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// GalleryApplicationVersionList the List Gallery Application version operation response. -type GalleryApplicationVersionList struct { +// LinuxConfiguration specifies the Linux operating system settings on the virtual machine.

For a +// list of supported Linux distributions, see [Linux on Azure-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros). +type LinuxConfiguration struct { + // DisablePasswordAuthentication - Specifies whether password authentication should be disabled. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + // SSH - Specifies the ssh key configuration for a Linux OS. + SSH *SSHConfiguration `json:"ssh,omitempty"` + // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + // PatchSettings - [Preview Feature] Specifies settings related to VM Guest Patching on Linux. + PatchSettings *LinuxPatchSettings `json:"patchSettings,omitempty"` +} + +// LinuxParameters input for InstallPatches on a Linux VM, as directly received by the API +type LinuxParameters struct { + // ClassificationsToInclude - The update classifications to select when installing patches for Linux. + ClassificationsToInclude *[]VMGuestPatchClassificationLinux `json:"classificationsToInclude,omitempty"` + // PackageNameMasksToInclude - packages to include in the patch operation. Format: packageName_packageVersion + PackageNameMasksToInclude *[]string `json:"packageNameMasksToInclude,omitempty"` + // PackageNameMasksToExclude - packages to exclude in the patch operation. Format: packageName_packageVersion + PackageNameMasksToExclude *[]string `json:"packageNameMasksToExclude,omitempty"` + // MaintenanceRunID - This is used as a maintenance run identifier for Auto VM Guest Patching in Linux. + MaintenanceRunID *string `json:"maintenanceRunId,omitempty"` +} + +// LinuxPatchSettings specifies settings related to VM Guest Patching on Linux. +type LinuxPatchSettings struct { + // PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale set with OrchestrationMode as Flexible.

Possible values are:

**ImageDefault** - The virtual machine's default patching configuration is used.

**AutomaticByPlatform** - The virtual machine will be automatically updated by the platform. The property provisionVMAgent must be true. Possible values include: 'LinuxVMGuestPatchModeImageDefault', 'LinuxVMGuestPatchModeAutomaticByPlatform' + PatchMode LinuxVMGuestPatchMode `json:"patchMode,omitempty"` + // AssessmentMode - Specifies the mode of VM Guest Patch Assessment for the IaaS virtual machine.

Possible values are:

**ImageDefault** - You control the timing of patch assessments on a virtual machine.

**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'LinuxPatchAssessmentModeImageDefault', 'LinuxPatchAssessmentModeAutomaticByPlatform' + AssessmentMode LinuxPatchAssessmentMode `json:"assessmentMode,omitempty"` +} + +// ListUsagesResult the List Usages operation response. +type ListUsagesResult struct { autorest.Response `json:"-"` - // Value - A list of gallery Application Versions. - Value *[]GalleryApplicationVersion `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of gallery Application Versions. Call ListNext() with this to fetch the next page of gallery Application Versions. + // Value - The list of compute resource usages. + Value *[]Usage `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. NextLink *string `json:"nextLink,omitempty"` } -// GalleryApplicationVersionListIterator provides access to a complete listing of GalleryApplicationVersion -// values. -type GalleryApplicationVersionListIterator struct { +// ListUsagesResultIterator provides access to a complete listing of Usage values. +type ListUsagesResultIterator struct { i int - page GalleryApplicationVersionListPage + page ListUsagesResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *GalleryApplicationVersionListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *ListUsagesResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -4747,67 +9927,67 @@ func (iter *GalleryApplicationVersionListIterator) NextWithContext(ctx context.C // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *GalleryApplicationVersionListIterator) Next() error { +func (iter *ListUsagesResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter GalleryApplicationVersionListIterator) NotDone() bool { +func (iter ListUsagesResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter GalleryApplicationVersionListIterator) Response() GalleryApplicationVersionList { +func (iter ListUsagesResultIterator) Response() ListUsagesResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter GalleryApplicationVersionListIterator) Value() GalleryApplicationVersion { +func (iter ListUsagesResultIterator) Value() Usage { if !iter.page.NotDone() { - return GalleryApplicationVersion{} + return Usage{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the GalleryApplicationVersionListIterator type. -func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator { - return GalleryApplicationVersionListIterator{page: page} +// Creates a new instance of the ListUsagesResultIterator type. +func NewListUsagesResultIterator(page ListUsagesResultPage) ListUsagesResultIterator { + return ListUsagesResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (gavl GalleryApplicationVersionList) IsEmpty() bool { - return gavl.Value == nil || len(*gavl.Value) == 0 +func (lur ListUsagesResult) IsEmpty() bool { + return lur.Value == nil || len(*lur.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (gavl GalleryApplicationVersionList) hasNextLink() bool { - return gavl.NextLink != nil && len(*gavl.NextLink) != 0 +func (lur ListUsagesResult) hasNextLink() bool { + return lur.NextLink != nil && len(*lur.NextLink) != 0 } -// galleryApplicationVersionListPreparer prepares a request to retrieve the next set of results. +// listUsagesResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (gavl GalleryApplicationVersionList) galleryApplicationVersionListPreparer(ctx context.Context) (*http.Request, error) { - if !gavl.hasNextLink() { +func (lur ListUsagesResult) listUsagesResultPreparer(ctx context.Context) (*http.Request, error) { + if !lur.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(gavl.NextLink))) + autorest.WithBaseURL(to.String(lur.NextLink))) } -// GalleryApplicationVersionListPage contains a page of GalleryApplicationVersion values. -type GalleryApplicationVersionListPage struct { - fn func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error) - gavl GalleryApplicationVersionList +// ListUsagesResultPage contains a page of Usage values. +type ListUsagesResultPage struct { + fn func(context.Context, ListUsagesResult) (ListUsagesResult, error) + lur ListUsagesResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Context) (err error) { +func (page *ListUsagesResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -4817,11 +9997,11 @@ func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Conte }() } for { - next, err := page.fn(ctx, page.gavl) + next, err := page.fn(ctx, page.lur) if err != nil { return err } - page.gavl = next + page.lur = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -4832,158 +10012,93 @@ func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Conte // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *GalleryApplicationVersionListPage) Next() error { +func (page *ListUsagesResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page GalleryApplicationVersionListPage) NotDone() bool { - return !page.gavl.IsEmpty() +func (page ListUsagesResultPage) NotDone() bool { + return !page.lur.IsEmpty() } // Response returns the raw server response from the last page request. -func (page GalleryApplicationVersionListPage) Response() GalleryApplicationVersionList { - return page.gavl +func (page ListUsagesResultPage) Response() ListUsagesResult { + return page.lur } // Values returns the slice of values for the current page or nil if there are no values. -func (page GalleryApplicationVersionListPage) Values() []GalleryApplicationVersion { - if page.gavl.IsEmpty() { +func (page ListUsagesResultPage) Values() []Usage { + if page.lur.IsEmpty() { return nil } - return *page.gavl.Value -} - -// Creates a new instance of the GalleryApplicationVersionListPage type. -func NewGalleryApplicationVersionListPage(cur GalleryApplicationVersionList, getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage { - return GalleryApplicationVersionListPage{ - fn: getNextPage, - gavl: cur, - } -} - -// GalleryApplicationVersionProperties describes the properties of a gallery Image Version. -type GalleryApplicationVersionProperties struct { - PublishingProfile *GalleryApplicationVersionPublishingProfile `json:"publishingProfile,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating' - ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"` - // ReplicationStatus - READ-ONLY - ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` -} - -// MarshalJSON is the custom marshaler for GalleryApplicationVersionProperties. -func (gavp GalleryApplicationVersionProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if gavp.PublishingProfile != nil { - objectMap["publishingProfile"] = gavp.PublishingProfile - } - return json.Marshal(objectMap) -} - -// GalleryApplicationVersionPublishingProfile the publishing profile of a gallery image version. -type GalleryApplicationVersionPublishingProfile struct { - Source *UserArtifactSource `json:"source,omitempty"` - ManageActions *UserArtifactManage `json:"manageActions,omitempty"` - // EnableHealthCheck - Optional. Whether or not this application reports health. - EnableHealthCheck *bool `json:"enableHealthCheck,omitempty"` - // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. - TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` - // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. - ReplicaCount *int32 `json:"replicaCount,omitempty"` - // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. - ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` - // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published. - PublishedDate *date.Time `json:"publishedDate,omitempty"` - // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable. - EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` - // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' - StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` -} - -// MarshalJSON is the custom marshaler for GalleryApplicationVersionPublishingProfile. -func (gavpp GalleryApplicationVersionPublishingProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if gavpp.Source != nil { - objectMap["source"] = gavpp.Source - } - if gavpp.ManageActions != nil { - objectMap["manageActions"] = gavpp.ManageActions - } - if gavpp.EnableHealthCheck != nil { - objectMap["enableHealthCheck"] = gavpp.EnableHealthCheck - } - if gavpp.TargetRegions != nil { - objectMap["targetRegions"] = gavpp.TargetRegions - } - if gavpp.ReplicaCount != nil { - objectMap["replicaCount"] = gavpp.ReplicaCount - } - if gavpp.ExcludeFromLatest != nil { - objectMap["excludeFromLatest"] = gavpp.ExcludeFromLatest - } - if gavpp.EndOfLifeDate != nil { - objectMap["endOfLifeDate"] = gavpp.EndOfLifeDate - } - if gavpp.StorageAccountType != "" { - objectMap["storageAccountType"] = gavpp.StorageAccountType - } - return json.Marshal(objectMap) -} - -// GalleryApplicationVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results -// of a long-running operation. -type GalleryApplicationVersionsCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error) + return *page.lur.Value } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryApplicationVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// Creates a new instance of the ListUsagesResultPage type. +func NewListUsagesResultPage(cur ListUsagesResult, getNextPage func(context.Context, ListUsagesResult) (ListUsagesResult, error)) ListUsagesResultPage { + return ListUsagesResultPage{ + fn: getNextPage, + lur: cur, } - future.FutureAPI = &azFuture - future.Result = future.result - return nil } -// result is the default implementation for GalleryApplicationVersionsCreateOrUpdateFuture.Result. -func (future *GalleryApplicationVersionsCreateOrUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - gav.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { - gav, err = client.CreateOrUpdateResponder(gav.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") - } - } - return +// ListVirtualMachineExtensionImage ... +type ListVirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` } -// GalleryApplicationVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryApplicationVersionsDeleteFuture struct { +// ListVirtualMachineImageResource ... +type ListVirtualMachineImageResource struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineImageResource `json:"value,omitempty"` +} + +// LoadBalancerConfiguration describes the load balancer configuration. +type LoadBalancerConfiguration struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The name of the Load balancer + Name *string `json:"name,omitempty"` + // Properties - Properties of the load balancer configuration. + Properties *LoadBalancerConfigurationProperties `json:"properties,omitempty"` +} + +// LoadBalancerConfigurationProperties ... +type LoadBalancerConfigurationProperties struct { + // FrontendIPConfigurations - Specifies the frontend IP to be used for the load balancer. Only IPv4 frontend IP address is supported. Each load balancer configuration must have exactly one frontend IP configuration. + FrontendIPConfigurations *[]LoadBalancerFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` +} + +// LoadBalancerFrontendIPConfiguration ... +type LoadBalancerFrontendIPConfiguration struct { + // Name - The name of the resource that is unique within the set of frontend IP configurations used by the load balancer. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Properties - Properties of load balancer frontend ip configuration. + Properties *LoadBalancerFrontendIPConfigurationProperties `json:"properties,omitempty"` +} + +// LoadBalancerFrontendIPConfigurationProperties describes a cloud service IP Configuration +type LoadBalancerFrontendIPConfigurationProperties struct { + // PublicIPAddress - The reference to the public ip address resource. + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + // Subnet - The reference to the virtual network subnet resource. + Subnet *SubResource `json:"subnet,omitempty"` + // PrivateIPAddress - The virtual network private IP address of the IP configuration. + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` +} + +// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type LogAnalyticsExportRequestRateByIntervalFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(GalleryApplicationVersionsClient) (autorest.Response, error) + Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryApplicationVersionsDeleteFuture) UnmarshalJSON(body []byte) error { +func (future *LogAnalyticsExportRequestRateByIntervalFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -4993,34 +10108,40 @@ func (future *GalleryApplicationVersionsDeleteFuture) UnmarshalJSON(body []byte) return nil } -// result is the default implementation for GalleryApplicationVersionsDeleteFuture.Result. -func (future *GalleryApplicationVersionsDeleteFuture) result(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) { +// result is the default implementation for LogAnalyticsExportRequestRateByIntervalFuture.Result. +func (future *LogAnalyticsExportRequestRateByIntervalFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") return } if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture") + laor.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") return } - ar.Response = future.Response() + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } return } -// GalleryApplicationVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a +// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. -type GalleryApplicationVersionsUpdateFuture struct { +type LogAnalyticsExportThrottledRequestsFuture struct { azure.FutureAPI // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. - Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error) + Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) } // UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryApplicationVersionsUpdateFuture) UnmarshalJSON(body []byte) error { +func (future *LogAnalyticsExportThrottledRequestsFuture) UnmarshalJSON(body []byte) error { var azFuture azure.Future if err := json.Unmarshal(body, &azFuture); err != nil { return err @@ -5030,255 +10151,129 @@ func (future *GalleryApplicationVersionsUpdateFuture) UnmarshalJSON(body []byte) return nil } -// result is the default implementation for GalleryApplicationVersionsUpdateFuture.Result. -func (future *GalleryApplicationVersionsUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { +// result is the default implementation for LogAnalyticsExportThrottledRequestsFuture.Result. +func (future *LogAnalyticsExportThrottledRequestsFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") return } if !done { - gav.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsUpdateFuture") + laor.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { - gav, err = client.UpdateResponder(gav.Response.Response) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") } } return } -// GalleryApplicationVersionUpdate specifies information about the gallery Application Version that you -// want to update. -type GalleryApplicationVersionUpdate struct { - *GalleryApplicationVersionProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for GalleryApplicationVersionUpdate. -func (gavu GalleryApplicationVersionUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if gavu.GalleryApplicationVersionProperties != nil { - objectMap["properties"] = gavu.GalleryApplicationVersionProperties - } - if gavu.Tags != nil { - objectMap["tags"] = gavu.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersionUpdate struct. -func (gavu *GalleryApplicationVersionUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var galleryApplicationVersionProperties GalleryApplicationVersionProperties - err = json.Unmarshal(*v, &galleryApplicationVersionProperties) - if err != nil { - return err - } - gavu.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - gavu.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - gavu.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - gavu.Type = &typeVar - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - gavu.Tags = tags - } - } - } - - return nil -} - -// GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile. -type GalleryArtifactPublishingProfileBase struct { - // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. - TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` - // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. - ReplicaCount *int32 `json:"replicaCount,omitempty"` - // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. - ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` - // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published. - PublishedDate *date.Time `json:"publishedDate,omitempty"` - // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable. - EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` - // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' - StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` -} - -// MarshalJSON is the custom marshaler for GalleryArtifactPublishingProfileBase. -func (gappb GalleryArtifactPublishingProfileBase) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if gappb.TargetRegions != nil { - objectMap["targetRegions"] = gappb.TargetRegions - } - if gappb.ReplicaCount != nil { - objectMap["replicaCount"] = gappb.ReplicaCount - } - if gappb.ExcludeFromLatest != nil { - objectMap["excludeFromLatest"] = gappb.ExcludeFromLatest - } - if gappb.EndOfLifeDate != nil { - objectMap["endOfLifeDate"] = gappb.EndOfLifeDate - } - if gappb.StorageAccountType != "" { - objectMap["storageAccountType"] = gappb.StorageAccountType - } - return json.Marshal(objectMap) -} - -// GalleryArtifactSource the source image from which the Image Version is going to be created. -type GalleryArtifactSource struct { - ManagedImage *ManagedArtifact `json:"managedImage,omitempty"` -} - -// GalleryArtifactVersionSource the gallery artifact version source. -type GalleryArtifactVersionSource struct { - // ID - The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, or user image. - ID *string `json:"id,omitempty"` -} - -// GalleryDataDiskImage this is the data disk image. -type GalleryDataDiskImage struct { - // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. - Lun *int32 `json:"lun,omitempty"` - // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created. - SizeInGB *int32 `json:"sizeInGB,omitempty"` - // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' - HostCaching HostCaching `json:"hostCaching,omitempty"` - Source *GalleryArtifactVersionSource `json:"source,omitempty"` -} - -// MarshalJSON is the custom marshaler for GalleryDataDiskImage. -func (gddi GalleryDataDiskImage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if gddi.Lun != nil { - objectMap["lun"] = gddi.Lun - } - if gddi.HostCaching != "" { - objectMap["hostCaching"] = gddi.HostCaching - } - if gddi.Source != nil { - objectMap["source"] = gddi.Source - } - return json.Marshal(objectMap) +// LogAnalyticsInputBase api input base class for LogAnalytics Api. +type LogAnalyticsInputBase struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + // GroupByClientApplicationID - Group query result by Client Application ID. + GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` + // GroupByUserAgent - Group query result by User Agent. + GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` } -// GalleryDiskImage this is the disk image base class. -type GalleryDiskImage struct { - // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created. - SizeInGB *int32 `json:"sizeInGB,omitempty"` - // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' - HostCaching HostCaching `json:"hostCaching,omitempty"` - Source *GalleryArtifactVersionSource `json:"source,omitempty"` +// LogAnalyticsOperationResult logAnalytics operation status response +type LogAnalyticsOperationResult struct { + autorest.Response `json:"-"` + // Properties - READ-ONLY; LogAnalyticsOutput + Properties *LogAnalyticsOutput `json:"properties,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryDiskImage. -func (gdi GalleryDiskImage) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for LogAnalyticsOperationResult. +func (laor LogAnalyticsOperationResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if gdi.HostCaching != "" { - objectMap["hostCaching"] = gdi.HostCaching - } - if gdi.Source != nil { - objectMap["source"] = gdi.Source - } return json.Marshal(objectMap) } -// GalleryIdentifier describes the gallery unique name. -type GalleryIdentifier struct { - // UniqueName - READ-ONLY; The unique name of the Shared Image Gallery. This name is generated automatically by Azure. - UniqueName *string `json:"uniqueName,omitempty"` +// LogAnalyticsOutput logAnalytics output properties +type LogAnalyticsOutput struct { + // Output - READ-ONLY; Output file Uri path to blob container. + Output *string `json:"output,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryIdentifier. -func (gi GalleryIdentifier) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for LogAnalyticsOutput. +func (lao LogAnalyticsOutput) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// GalleryImage specifies information about the gallery Image Definition that you want to create or update. -type GalleryImage struct { - autorest.Response `json:"-"` - *GalleryImageProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id +// MaintenanceRedeployStatus maintenance Operation Status. +type MaintenanceRedeployStatus struct { + // IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance. + IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` + // PreMaintenanceWindowStartTime - Start Time for the Pre Maintenance Window. + PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"` + // PreMaintenanceWindowEndTime - End Time for the Pre Maintenance Window. + PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"` + // MaintenanceWindowStartTime - Start Time for the Maintenance Window. + MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"` + // MaintenanceWindowEndTime - End Time for the Maintenance Window. + MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"` + // LastOperationResultCode - The Last Maintenance Operation Result Code. Possible values include: 'MaintenanceOperationResultCodeTypesNone', 'MaintenanceOperationResultCodeTypesRetryLater', 'MaintenanceOperationResultCodeTypesMaintenanceAborted', 'MaintenanceOperationResultCodeTypesMaintenanceCompleted' + LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` + // LastOperationMessage - Message returned for the last Maintenance Operation. + LastOperationMessage *string `json:"lastOperationMessage,omitempty"` +} + +// ManagedArtifact the managed artifact. +type ManagedArtifact struct { + // ID - The managed artifact id. ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for GalleryImage. -func (gi GalleryImage) MarshalJSON() ([]byte, error) { +// ManagedDiskParameters the parameters of a managed disk. +type ManagedDiskParameters struct { + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// NetworkInterfaceReference describes a network interface reference. +type NetworkInterfaceReference struct { + *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for NetworkInterfaceReference. +func (nir NetworkInterfaceReference) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if gi.GalleryImageProperties != nil { - objectMap["properties"] = gi.GalleryImageProperties - } - if gi.Location != nil { - objectMap["location"] = gi.Location + if nir.NetworkInterfaceReferenceProperties != nil { + objectMap["properties"] = nir.NetworkInterfaceReferenceProperties } - if gi.Tags != nil { - objectMap["tags"] = gi.Tags + if nir.ID != nil { + objectMap["id"] = nir.ID } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryImage struct. -func (gi *GalleryImage) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for NetworkInterfaceReference struct. +func (nir *NetworkInterfaceReference) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -5288,12 +10283,12 @@ func (gi *GalleryImage) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var galleryImageProperties GalleryImageProperties - err = json.Unmarshal(*v, &galleryImageProperties) + var networkInterfaceReferenceProperties NetworkInterfaceReferenceProperties + err = json.Unmarshal(*v, &networkInterfaceReferenceProperties) if err != nil { return err } - gi.GalleryImageProperties = &galleryImageProperties + nir.NetworkInterfaceReferenceProperties = &networkInterfaceReferenceProperties } case "id": if v != nil { @@ -5302,80 +10297,228 @@ func (gi *GalleryImage) UnmarshalJSON(body []byte) error { if err != nil { return err } - gi.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - gi.Name = &name + nir.ID = &ID } - case "type": + } + } + + return nil +} + +// NetworkInterfaceReferenceProperties describes a network interface reference properties. +type NetworkInterfaceReferenceProperties struct { + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` +} + +// NetworkProfile specifies the network interfaces or the networking configuration of the virtual machine. +type NetworkProfile struct { + // NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine. + NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` + // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne' + NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"` + // NetworkInterfaceConfigurations - Specifies the networking configurations that will be used to create the virtual machine networking resources. + NetworkInterfaceConfigurations *[]VirtualMachineNetworkInterfaceConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// OperationListResult the List Compute Operation operation response. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The list of compute operations + Value *[]OperationValue `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for OperationListResult. +func (olr OperationListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// OperationValue describes the properties of a Compute Operation value. +type OperationValue struct { + // Origin - READ-ONLY; The origin of the compute operation. + Origin *string `json:"origin,omitempty"` + // Name - READ-ONLY; The name of the compute operation. + Name *string `json:"name,omitempty"` + *OperationValueDisplay `json:"display,omitempty"` +} + +// MarshalJSON is the custom marshaler for OperationValue. +func (ov OperationValue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ov.OperationValueDisplay != nil { + objectMap["display"] = ov.OperationValueDisplay + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OperationValue struct. +func (ov *OperationValue) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "origin": if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) + var origin string + err = json.Unmarshal(*v, &origin) if err != nil { return err } - gi.Type = &typeVar + ov.Origin = &origin } - case "location": + case "name": if v != nil { - var location string - err = json.Unmarshal(*v, &location) + var name string + err = json.Unmarshal(*v, &name) if err != nil { return err } - gi.Location = &location + ov.Name = &name } - case "tags": + case "display": if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) + var operationValueDisplay OperationValueDisplay + err = json.Unmarshal(*v, &operationValueDisplay) if err != nil { return err } - gi.Tags = tags + ov.OperationValueDisplay = &operationValueDisplay } } } - return nil + return nil +} + +// OperationValueDisplay describes the properties of a Compute Operation Value Display. +type OperationValueDisplay struct { + // Operation - READ-ONLY; The display name of the compute operation. + Operation *string `json:"operation,omitempty"` + // Resource - READ-ONLY; The display name of the resource the operation applies to. + Resource *string `json:"resource,omitempty"` + // Description - READ-ONLY; The description of the operation. + Description *string `json:"description,omitempty"` + // Provider - READ-ONLY; The resource provider for the operation. + Provider *string `json:"provider,omitempty"` +} + +// MarshalJSON is the custom marshaler for OperationValueDisplay. +func (ovd OperationValueDisplay) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// OrchestrationServiceStateInput the input for OrchestrationServiceState +type OrchestrationServiceStateInput struct { + // ServiceName - The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs' + ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"` + // Action - The action to be performed. Possible values include: 'OrchestrationServiceStateActionResume', 'OrchestrationServiceStateActionSuspend' + Action OrchestrationServiceStateAction `json:"action,omitempty"` +} + +// OrchestrationServiceSummary summary for an orchestration service of a virtual machine scale set. +type OrchestrationServiceSummary struct { + // ServiceName - READ-ONLY; The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs', 'OrchestrationServiceNamesDummyOrchestrationServiceName' + ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"` + // ServiceState - READ-ONLY; The current state of the service. Possible values include: 'OrchestrationServiceStateNotRunning', 'OrchestrationServiceStateRunning', 'OrchestrationServiceStateSuspended' + ServiceState OrchestrationServiceState `json:"serviceState,omitempty"` +} + +// MarshalJSON is the custom marshaler for OrchestrationServiceSummary. +func (oss OrchestrationServiceSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// OSDisk specifies information about the operating system disk used by the virtual machine.

For +// more information about disks, see [About disks and VHDs for Azure virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). +type OSDisk struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None** for Standard storage. **ReadOnly** for Premium storage. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // DiffDiskSettings - Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + // DeleteOption - Specifies whether OS Disk should be deleted or detached upon VM deletion.

Possible values:

**Delete** If this value is used, the OS disk is deleted when VM is deleted.

**Detach** If this value is used, the os disk is retained after VM is deleted.

The default value is set to **detach**. For an ephemeral OS Disk, the default value is set to **Delete**. User cannot change the delete option for ephemeral OS Disk. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach' + DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"` +} + +// OSDiskImage contains the os disk image information. +type OSDiskImage struct { + // OperatingSystem - The operating system of the osDiskImage. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` } -// GalleryImageIdentifier this is the gallery Image Definition identifier. -type GalleryImageIdentifier struct { - // Publisher - The name of the gallery Image Definition publisher. - Publisher *string `json:"publisher,omitempty"` - // Offer - The name of the gallery Image Definition offer. - Offer *string `json:"offer,omitempty"` - // Sku - The name of the gallery Image Definition SKU. - Sku *string `json:"sku,omitempty"` +// OSDiskImageEncryption contains encryption settings for an OS disk image. +type OSDiskImageEncryption struct { + // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` } -// GalleryImageList the List Gallery Images operation response. -type GalleryImageList struct { +// OSFamily describes a cloud service OS family. +type OSFamily struct { autorest.Response `json:"-"` - // Value - A list of Shared Image Gallery images. - Value *[]GalleryImage `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Image Definitions in the Shared Image Gallery. Call ListNext() with this to fetch the next page of gallery Image Definitions. - NextLink *string `json:"nextLink,omitempty"` + // ID - READ-ONLY; Resource Id. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - READ-ONLY; Resource location. + Location *string `json:"location,omitempty"` + Properties *OSFamilyProperties `json:"properties,omitempty"` } -// GalleryImageListIterator provides access to a complete listing of GalleryImage values. -type GalleryImageListIterator struct { +// MarshalJSON is the custom marshaler for OSFamily. +func (of OSFamily) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if of.Properties != nil { + objectMap["properties"] = of.Properties + } + return json.Marshal(objectMap) +} + +// OSFamilyListResult ... +type OSFamilyListResult struct { + autorest.Response `json:"-"` + Value *[]OSFamily `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// OSFamilyListResultIterator provides access to a complete listing of OSFamily values. +type OSFamilyListResultIterator struct { i int - page GalleryImageListPage + page OSFamilyListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *GalleryImageListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *OSFamilyListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/OSFamilyListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -5400,67 +10543,67 @@ func (iter *GalleryImageListIterator) NextWithContext(ctx context.Context) (err // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *GalleryImageListIterator) Next() error { +func (iter *OSFamilyListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter GalleryImageListIterator) NotDone() bool { +func (iter OSFamilyListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter GalleryImageListIterator) Response() GalleryImageList { +func (iter OSFamilyListResultIterator) Response() OSFamilyListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter GalleryImageListIterator) Value() GalleryImage { +func (iter OSFamilyListResultIterator) Value() OSFamily { if !iter.page.NotDone() { - return GalleryImage{} + return OSFamily{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the GalleryImageListIterator type. -func NewGalleryImageListIterator(page GalleryImageListPage) GalleryImageListIterator { - return GalleryImageListIterator{page: page} +// Creates a new instance of the OSFamilyListResultIterator type. +func NewOSFamilyListResultIterator(page OSFamilyListResultPage) OSFamilyListResultIterator { + return OSFamilyListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (gil GalleryImageList) IsEmpty() bool { - return gil.Value == nil || len(*gil.Value) == 0 +func (oflr OSFamilyListResult) IsEmpty() bool { + return oflr.Value == nil || len(*oflr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (gil GalleryImageList) hasNextLink() bool { - return gil.NextLink != nil && len(*gil.NextLink) != 0 +func (oflr OSFamilyListResult) hasNextLink() bool { + return oflr.NextLink != nil && len(*oflr.NextLink) != 0 } -// galleryImageListPreparer prepares a request to retrieve the next set of results. +// oSFamilyListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (gil GalleryImageList) galleryImageListPreparer(ctx context.Context) (*http.Request, error) { - if !gil.hasNextLink() { +func (oflr OSFamilyListResult) oSFamilyListResultPreparer(ctx context.Context) (*http.Request, error) { + if !oflr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(gil.NextLink))) + autorest.WithBaseURL(to.String(oflr.NextLink))) } -// GalleryImageListPage contains a page of GalleryImage values. -type GalleryImageListPage struct { - fn func(context.Context, GalleryImageList) (GalleryImageList, error) - gil GalleryImageList +// OSFamilyListResultPage contains a page of OSFamily values. +type OSFamilyListResultPage struct { + fn func(context.Context, OSFamilyListResult) (OSFamilyListResult, error) + oflr OSFamilyListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *GalleryImageListPage) NextWithContext(ctx context.Context) (err error) { +func (page *OSFamilyListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/OSFamilyListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -5470,11 +10613,11 @@ func (page *GalleryImageListPage) NextWithContext(ctx context.Context) (err erro }() } for { - next, err := page.fn(ctx, page.gil) + next, err := page.fn(ctx, page.oflr) if err != nil { return err } - page.gil = next + page.oflr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -5482,436 +10625,119 @@ func (page *GalleryImageListPage) NextWithContext(ctx context.Context) (err erro return nil } -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *GalleryImageListPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page GalleryImageListPage) NotDone() bool { - return !page.gil.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page GalleryImageListPage) Response() GalleryImageList { - return page.gil -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page GalleryImageListPage) Values() []GalleryImage { - if page.gil.IsEmpty() { - return nil - } - return *page.gil.Value -} - -// Creates a new instance of the GalleryImageListPage type. -func NewGalleryImageListPage(cur GalleryImageList, getNextPage func(context.Context, GalleryImageList) (GalleryImageList, error)) GalleryImageListPage { - return GalleryImageListPage{ - fn: getNextPage, - gil: cur, - } -} - -// GalleryImageProperties describes the properties of a gallery Image Definition. -type GalleryImageProperties struct { - // Description - The description of this gallery Image Definition resource. This property is updatable. - Description *string `json:"description,omitempty"` - // Eula - The Eula agreement for the gallery Image Definition. - Eula *string `json:"eula,omitempty"` - // PrivacyStatementURI - The privacy statement uri. - PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` - // ReleaseNoteURI - The release note uri. - ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` - // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized' - OsState OperatingSystemStateTypes `json:"osState,omitempty"` - // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' - HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` - // EndOfLifeDate - The end of life date of the gallery Image Definition. This property can be used for decommissioning purposes. This property is updatable. - EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` - Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` - Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` - Disallowed *Disallowed `json:"disallowed,omitempty"` - PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating' - ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"` -} - -// MarshalJSON is the custom marshaler for GalleryImageProperties. -func (gip GalleryImageProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if gip.Description != nil { - objectMap["description"] = gip.Description - } - if gip.Eula != nil { - objectMap["eula"] = gip.Eula - } - if gip.PrivacyStatementURI != nil { - objectMap["privacyStatementUri"] = gip.PrivacyStatementURI - } - if gip.ReleaseNoteURI != nil { - objectMap["releaseNoteUri"] = gip.ReleaseNoteURI - } - if gip.OsType != "" { - objectMap["osType"] = gip.OsType - } - if gip.OsState != "" { - objectMap["osState"] = gip.OsState - } - if gip.HyperVGeneration != "" { - objectMap["hyperVGeneration"] = gip.HyperVGeneration - } - if gip.EndOfLifeDate != nil { - objectMap["endOfLifeDate"] = gip.EndOfLifeDate - } - if gip.Identifier != nil { - objectMap["identifier"] = gip.Identifier - } - if gip.Recommended != nil { - objectMap["recommended"] = gip.Recommended - } - if gip.Disallowed != nil { - objectMap["disallowed"] = gip.Disallowed - } - if gip.PurchasePlan != nil { - objectMap["purchasePlan"] = gip.PurchasePlan - } - return json.Marshal(objectMap) -} - -// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryImagesCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryImagesClient) (GalleryImage, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for GalleryImagesCreateOrUpdateFuture.Result. -func (future *GalleryImagesCreateOrUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - gi.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { - gi, err = client.CreateOrUpdateResponder(gi.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") - } - } - return -} - -// GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleryImagesDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryImagesClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryImagesDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for GalleryImagesDeleteFuture.Result. -func (future *GalleryImagesDeleteFuture) result(client GalleryImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// GalleryImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type GalleryImagesUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryImagesClient) (GalleryImage, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryImagesUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for GalleryImagesUpdateFuture.Result. -func (future *GalleryImagesUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - gi.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesUpdateFuture") - return +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *OSFamilyListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page OSFamilyListResultPage) NotDone() bool { + return !page.oflr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page OSFamilyListResultPage) Response() OSFamilyListResult { + return page.oflr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page OSFamilyListResultPage) Values() []OSFamily { + if page.oflr.IsEmpty() { + return nil } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { - gi, err = client.UpdateResponder(gi.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") - } + return *page.oflr.Value +} + +// Creates a new instance of the OSFamilyListResultPage type. +func NewOSFamilyListResultPage(cur OSFamilyListResult, getNextPage func(context.Context, OSFamilyListResult) (OSFamilyListResult, error)) OSFamilyListResultPage { + return OSFamilyListResultPage{ + fn: getNextPage, + oflr: cur, } - return } -// GalleryImageUpdate specifies information about the gallery Image Definition that you want to update. -type GalleryImageUpdate struct { - *GalleryImageProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name +// OSFamilyProperties OS family properties. +type OSFamilyProperties struct { + // Name - READ-ONLY; The OS family name. Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` + // Label - READ-ONLY; The OS family label. + Label *string `json:"label,omitempty"` + // Versions - READ-ONLY; List of OS versions belonging to this family. + Versions *[]OSVersionPropertiesBase `json:"versions,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryImageUpdate. -func (giu GalleryImageUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for OSFamilyProperties. +func (ofp OSFamilyProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if giu.GalleryImageProperties != nil { - objectMap["properties"] = giu.GalleryImageProperties - } - if giu.Tags != nil { - objectMap["tags"] = giu.Tags - } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryImageUpdate struct. -func (giu *GalleryImageUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var galleryImageProperties GalleryImageProperties - err = json.Unmarshal(*v, &galleryImageProperties) - if err != nil { - return err - } - giu.GalleryImageProperties = &galleryImageProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - giu.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - giu.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - giu.Type = &typeVar - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - giu.Tags = tags - } - } - } - - return nil +// OSProfile specifies the operating system settings for the virtual machine. Some of the settings cannot +// be changed once VM is provisioned. +type OSProfile struct { + // ComputerName - Specifies the host OS name of the virtual machine.

This name cannot be updated after the VM is created.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/azure-resource-manager/management/resource-name-rules). + ComputerName *string `json:"computerName,omitempty"` + // AdminUsername - Specifies the name of the administrator account.

This property cannot be updated after the VM is created.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters. + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - Specifies the password of the administrator account.

**Minimum-length (Windows):** 8 characters

**Minimum-length (Linux):** 6 characters

**Max-length (Windows):** 123 characters

**Max-length (Linux):** 72 characters

**Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
Has lower characters
Has upper characters
Has a digit
Has a special character (Regex match [\W_])

**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp)

For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection) + AdminPassword *string `json:"adminPassword,omitempty"` + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

**Note: Do not pass any secrets or passwords in customData property**

This property cannot be updated after the VM is created.

customData is passed to the VM to be saved as a file, for more information see [Custom Data on Azure VMs](https://azure.microsoft.com/blog/custom-data-and-cloud-init-on-windows-azure/)

For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init) + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros). + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - Specifies set of certificates that should be installed onto the virtual machine. To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows). + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` + // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.

This may only be set to False when no extensions are present on the virtual machine. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` + // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required to infer provision success of the virtual machine. **Note: This property is for private testing only, and all customers must not set the property to false.** + RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"` } -// GalleryImageVersion specifies information about the gallery Image Version that you want to create or -// update. -type GalleryImageVersion struct { - autorest.Response `json:"-"` - *GalleryImageVersionProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id +// OSVersion describes a cloud service OS version. +type OSVersion struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Resource Id. ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name + // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type + // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` + // Location - READ-ONLY; Resource location. + Location *string `json:"location,omitempty"` + Properties *OSVersionProperties `json:"properties,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryImageVersion. -func (giv GalleryImageVersion) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for OSVersion. +func (ov OSVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if giv.GalleryImageVersionProperties != nil { - objectMap["properties"] = giv.GalleryImageVersionProperties - } - if giv.Location != nil { - objectMap["location"] = giv.Location - } - if giv.Tags != nil { - objectMap["tags"] = giv.Tags + if ov.Properties != nil { + objectMap["properties"] = ov.Properties } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryImageVersion struct. -func (giv *GalleryImageVersion) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var galleryImageVersionProperties GalleryImageVersionProperties - err = json.Unmarshal(*v, &galleryImageVersionProperties) - if err != nil { - return err - } - giv.GalleryImageVersionProperties = &galleryImageVersionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - giv.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - giv.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - giv.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - giv.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - giv.Tags = tags - } - } - } - - return nil -} - -// GalleryImageVersionList the List Gallery Image version operation response. -type GalleryImageVersionList struct { +// OSVersionListResult ... +type OSVersionListResult struct { autorest.Response `json:"-"` - // Value - A list of gallery Image Versions. - Value *[]GalleryImageVersion `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of gallery Image Versions. Call ListNext() with this to fetch the next page of gallery Image Versions. - NextLink *string `json:"nextLink,omitempty"` + Value *[]OSVersion `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// GalleryImageVersionListIterator provides access to a complete listing of GalleryImageVersion values. -type GalleryImageVersionListIterator struct { +// OSVersionListResultIterator provides access to a complete listing of OSVersion values. +type OSVersionListResultIterator struct { i int - page GalleryImageVersionListPage + page OSVersionListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *GalleryImageVersionListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *OSVersionListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/OSVersionListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -5936,67 +10762,67 @@ func (iter *GalleryImageVersionListIterator) NextWithContext(ctx context.Context // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *GalleryImageVersionListIterator) Next() error { +func (iter *OSVersionListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter GalleryImageVersionListIterator) NotDone() bool { +func (iter OSVersionListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter GalleryImageVersionListIterator) Response() GalleryImageVersionList { +func (iter OSVersionListResultIterator) Response() OSVersionListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter GalleryImageVersionListIterator) Value() GalleryImageVersion { +func (iter OSVersionListResultIterator) Value() OSVersion { if !iter.page.NotDone() { - return GalleryImageVersion{} + return OSVersion{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the GalleryImageVersionListIterator type. -func NewGalleryImageVersionListIterator(page GalleryImageVersionListPage) GalleryImageVersionListIterator { - return GalleryImageVersionListIterator{page: page} +// Creates a new instance of the OSVersionListResultIterator type. +func NewOSVersionListResultIterator(page OSVersionListResultPage) OSVersionListResultIterator { + return OSVersionListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (givl GalleryImageVersionList) IsEmpty() bool { - return givl.Value == nil || len(*givl.Value) == 0 +func (ovlr OSVersionListResult) IsEmpty() bool { + return ovlr.Value == nil || len(*ovlr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (givl GalleryImageVersionList) hasNextLink() bool { - return givl.NextLink != nil && len(*givl.NextLink) != 0 +func (ovlr OSVersionListResult) hasNextLink() bool { + return ovlr.NextLink != nil && len(*ovlr.NextLink) != 0 } -// galleryImageVersionListPreparer prepares a request to retrieve the next set of results. +// oSVersionListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (givl GalleryImageVersionList) galleryImageVersionListPreparer(ctx context.Context) (*http.Request, error) { - if !givl.hasNextLink() { +func (ovlr OSVersionListResult) oSVersionListResultPreparer(ctx context.Context) (*http.Request, error) { + if !ovlr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(givl.NextLink))) + autorest.WithBaseURL(to.String(ovlr.NextLink))) } -// GalleryImageVersionListPage contains a page of GalleryImageVersion values. -type GalleryImageVersionListPage struct { - fn func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error) - givl GalleryImageVersionList +// OSVersionListResultPage contains a page of OSVersion values. +type OSVersionListResultPage struct { + fn func(context.Context, OSVersionListResult) (OSVersionListResult, error) + ovlr OSVersionListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *GalleryImageVersionListPage) NextWithContext(ctx context.Context) (err error) { +func (page *OSVersionListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/OSVersionListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -6006,11 +10832,11 @@ func (page *GalleryImageVersionListPage) NextWithContext(ctx context.Context) (e }() } for { - next, err := page.fn(ctx, page.givl) + next, err := page.fn(ctx, page.ovlr) if err != nil { return err } - page.givl = next + page.ovlr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -6021,253 +10847,306 @@ func (page *GalleryImageVersionListPage) NextWithContext(ctx context.Context) (e // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *GalleryImageVersionListPage) Next() error { +func (page *OSVersionListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page GalleryImageVersionListPage) NotDone() bool { - return !page.givl.IsEmpty() +func (page OSVersionListResultPage) NotDone() bool { + return !page.ovlr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page GalleryImageVersionListPage) Response() GalleryImageVersionList { - return page.givl +func (page OSVersionListResultPage) Response() OSVersionListResult { + return page.ovlr } // Values returns the slice of values for the current page or nil if there are no values. -func (page GalleryImageVersionListPage) Values() []GalleryImageVersion { - if page.givl.IsEmpty() { +func (page OSVersionListResultPage) Values() []OSVersion { + if page.ovlr.IsEmpty() { return nil } - return *page.givl.Value + return *page.ovlr.Value } -// Creates a new instance of the GalleryImageVersionListPage type. -func NewGalleryImageVersionListPage(cur GalleryImageVersionList, getNextPage func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error)) GalleryImageVersionListPage { - return GalleryImageVersionListPage{ +// Creates a new instance of the OSVersionListResultPage type. +func NewOSVersionListResultPage(cur OSVersionListResult, getNextPage func(context.Context, OSVersionListResult) (OSVersionListResult, error)) OSVersionListResultPage { + return OSVersionListResultPage{ fn: getNextPage, - givl: cur, + ovlr: cur, } } -// GalleryImageVersionProperties describes the properties of a gallery Image Version. -type GalleryImageVersionProperties struct { - PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState3Creating', 'ProvisioningState3Updating', 'ProvisioningState3Failed', 'ProvisioningState3Succeeded', 'ProvisioningState3Deleting', 'ProvisioningState3Migrating' - ProvisioningState ProvisioningState3 `json:"provisioningState,omitempty"` - StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"` - // ReplicationStatus - READ-ONLY - ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` +// OSVersionProperties OS version properties. +type OSVersionProperties struct { + // Family - READ-ONLY; The family of this OS version. + Family *string `json:"family,omitempty"` + // FamilyLabel - READ-ONLY; The family label of this OS version. + FamilyLabel *string `json:"familyLabel,omitempty"` + // Version - READ-ONLY; The OS version. + Version *string `json:"version,omitempty"` + // Label - READ-ONLY; The OS version label. + Label *string `json:"label,omitempty"` + // IsDefault - READ-ONLY; Specifies whether this is the default OS version for its family. + IsDefault *bool `json:"isDefault,omitempty"` + // IsActive - READ-ONLY; Specifies whether this OS version is active. + IsActive *bool `json:"isActive,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryImageVersionProperties. -func (givp GalleryImageVersionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for OSVersionProperties. +func (ovp OSVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if givp.PublishingProfile != nil { - objectMap["publishingProfile"] = givp.PublishingProfile - } - if givp.StorageProfile != nil { - objectMap["storageProfile"] = givp.StorageProfile - } return json.Marshal(objectMap) } -// GalleryImageVersionPublishingProfile the publishing profile of a gallery Image Version. -type GalleryImageVersionPublishingProfile struct { - // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. - TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` - // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. - ReplicaCount *int32 `json:"replicaCount,omitempty"` - // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. - ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` - // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published. - PublishedDate *date.Time `json:"publishedDate,omitempty"` - // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable. - EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` - // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' - StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` +// OSVersionPropertiesBase configuration view of an OS version. +type OSVersionPropertiesBase struct { + // Version - READ-ONLY; The OS version. + Version *string `json:"version,omitempty"` + // Label - READ-ONLY; The OS version label. + Label *string `json:"label,omitempty"` + // IsDefault - READ-ONLY; Specifies whether this is the default OS version for its family. + IsDefault *bool `json:"isDefault,omitempty"` + // IsActive - READ-ONLY; Specifies whether this OS version is active. + IsActive *bool `json:"isActive,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryImageVersionPublishingProfile. -func (givpp GalleryImageVersionPublishingProfile) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for OSVersionPropertiesBase. +func (ovpb OSVersionPropertiesBase) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if givpp.TargetRegions != nil { - objectMap["targetRegions"] = givpp.TargetRegions - } - if givpp.ReplicaCount != nil { - objectMap["replicaCount"] = givpp.ReplicaCount - } - if givpp.ExcludeFromLatest != nil { - objectMap["excludeFromLatest"] = givpp.ExcludeFromLatest - } - if givpp.EndOfLifeDate != nil { - objectMap["endOfLifeDate"] = givpp.EndOfLifeDate - } - if givpp.StorageAccountType != "" { - objectMap["storageAccountType"] = givpp.StorageAccountType - } return json.Marshal(objectMap) } -// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryImageVersionsCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) +// PatchInstallationDetail information about a specific patch that was encountered during an installation +// action. +type PatchInstallationDetail struct { + // PatchID - READ-ONLY; A unique identifier for the patch. + PatchID *string `json:"patchId,omitempty"` + // Name - READ-ONLY; The friendly name of the patch. + Name *string `json:"name,omitempty"` + // Version - READ-ONLY; The version string of the package. It may conform to Semantic Versioning. Only applies to Linux. + Version *string `json:"version,omitempty"` + // KbID - READ-ONLY; The KBID of the patch. Only applies to Windows patches. + KbID *string `json:"kbId,omitempty"` + // Classifications - READ-ONLY; The classification(s) of the patch as provided by the patch publisher. + Classifications *[]string `json:"classifications,omitempty"` + // InstallationState - READ-ONLY; The state of the patch after the installation operation completed. Possible values include: 'PatchInstallationStateUnknown', 'PatchInstallationStateInstalled', 'PatchInstallationStateFailed', 'PatchInstallationStateExcluded', 'PatchInstallationStateNotSelected', 'PatchInstallationStatePending' + InstallationState PatchInstallationState `json:"installationState,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryImageVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// MarshalJSON is the custom marshaler for PatchInstallationDetail. +func (pid PatchInstallationDetail) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// PatchSettings specifies settings related to VM Guest Patching on Windows. +type PatchSettings struct { + // PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale set with OrchestrationMode as Flexible.

Possible values are:

**Manual** - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the property WindowsConfiguration.enableAutomaticUpdates must be false

**AutomaticByOS** - The virtual machine will automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates must be true.

**AutomaticByPlatform** - the virtual machine will automatically updated by the platform. The properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true. Possible values include: 'WindowsVMGuestPatchModeManual', 'WindowsVMGuestPatchModeAutomaticByOS', 'WindowsVMGuestPatchModeAutomaticByPlatform' + PatchMode WindowsVMGuestPatchMode `json:"patchMode,omitempty"` + // EnableHotpatching - Enables customers to patch their Azure VMs without requiring a reboot. For enableHotpatching, the 'provisionVMAgent' must be set to true and 'patchMode' must be set to 'AutomaticByPlatform'. + EnableHotpatching *bool `json:"enableHotpatching,omitempty"` + // AssessmentMode - Specifies the mode of VM Guest patch assessment for the IaaS virtual machine.

Possible values are:

**ImageDefault** - You control the timing of patch assessments on a virtual machine.

**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'WindowsPatchAssessmentModeImageDefault', 'WindowsPatchAssessmentModeAutomaticByPlatform' + AssessmentMode WindowsPatchAssessmentMode `json:"assessmentMode,omitempty"` +} + +// PirCommunityGalleryResource base information about the community gallery resource in pir. +type PirCommunityGalleryResource struct { + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + *CommunityGalleryIdentifier `json:"identifier,omitempty"` +} + +// MarshalJSON is the custom marshaler for PirCommunityGalleryResource. +func (pcgr PirCommunityGalleryResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if pcgr.CommunityGalleryIdentifier != nil { + objectMap["identifier"] = pcgr.CommunityGalleryIdentifier } - future.FutureAPI = &azFuture - future.Result = future.result - return nil + return json.Marshal(objectMap) } -// result is the default implementation for GalleryImageVersionsCreateOrUpdateFuture.Result. -func (future *GalleryImageVersionsCreateOrUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) +// UnmarshalJSON is the custom unmarshaler for PirCommunityGalleryResource struct. +func (pcgr *PirCommunityGalleryResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - giv.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") - return + return err } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { - giv, err = client.CreateOrUpdateResponder(giv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + pcgr.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + pcgr.Location = &location + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + pcgr.Type = &typeVar + } + case "identifier": + if v != nil { + var communityGalleryIdentifier CommunityGalleryIdentifier + err = json.Unmarshal(*v, &communityGalleryIdentifier) + if err != nil { + return err + } + pcgr.CommunityGalleryIdentifier = &communityGalleryIdentifier + } } } - return + + return nil } -// GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryImageVersionsDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryImageVersionsClient) (autorest.Response, error) +// PirResource the Resource model definition. +type PirResource struct { + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryImageVersionsDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err +// MarshalJSON is the custom marshaler for PirResource. +func (pr PirResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// PirSharedGalleryResource base information about the shared gallery resource in pir. +type PirSharedGalleryResource struct { + *SharedGalleryIdentifier `json:"identifier,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` +} + +// MarshalJSON is the custom marshaler for PirSharedGalleryResource. +func (psgr PirSharedGalleryResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if psgr.SharedGalleryIdentifier != nil { + objectMap["identifier"] = psgr.SharedGalleryIdentifier } - future.FutureAPI = &azFuture - future.Result = future.result - return nil + return json.Marshal(objectMap) } -// result is the default implementation for GalleryImageVersionsDeleteFuture.Result. -func (future *GalleryImageVersionsDeleteFuture) result(client GalleryImageVersionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) +// UnmarshalJSON is the custom unmarshaler for PirSharedGalleryResource struct. +func (psgr *PirSharedGalleryResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") - return + return err } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") - return + for k, v := range m { + switch k { + case "identifier": + if v != nil { + var sharedGalleryIdentifier SharedGalleryIdentifier + err = json.Unmarshal(*v, &sharedGalleryIdentifier) + if err != nil { + return err + } + psgr.SharedGalleryIdentifier = &sharedGalleryIdentifier + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + psgr.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + psgr.Location = &location + } + } } - ar.Response = future.Response() - return -} -// GalleryImageVersionStorageProfile this is the storage profile of a Gallery Image Version. -type GalleryImageVersionStorageProfile struct { - Source *GalleryArtifactVersionSource `json:"source,omitempty"` - OsDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"` - // DataDiskImages - A list of data disk images. - DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"` + return nil } -// GalleryImageVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type GalleryImageVersionsUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(GalleryImageVersionsClient) (GalleryImageVersion, error) +// Plan specifies information about the marketplace image used to create the virtual machine. This element +// is only used for marketplace images. Before you can use a marketplace image from an API, you must enable +// the image for programmatic use. In the Azure portal, find the marketplace image that you want to use +// and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and +// then click **Save**. +type Plan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` + // PromotionCode - The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *GalleryImageVersionsUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil +// PrivateEndpoint the Private Endpoint resource. +type PrivateEndpoint struct { + // ID - READ-ONLY; The ARM identifier for Private Endpoint + ID *string `json:"id,omitempty"` } -// result is the default implementation for GalleryImageVersionsUpdateFuture.Result. -func (future *GalleryImageVersionsUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - giv.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { - giv, err = client.UpdateResponder(giv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") - } - } - return +// MarshalJSON is the custom marshaler for PrivateEndpoint. +func (peVar PrivateEndpoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// GalleryImageVersionUpdate specifies information about the gallery Image Version that you want to update. -type GalleryImageVersionUpdate struct { - *GalleryImageVersionProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id +// PrivateEndpointConnection the Private Endpoint Connection resource. +type PrivateEndpointConnection struct { + autorest.Response `json:"-"` + // PrivateEndpointConnectionProperties - Resource properties. + *PrivateEndpointConnectionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; private endpoint connection Id ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name + // Name - READ-ONLY; private endpoint connection name Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type + // Type - READ-ONLY; private endpoint connection type Type *string `json:"type,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for GalleryImageVersionUpdate. -func (givu GalleryImageVersionUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for PrivateEndpointConnection. +func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if givu.GalleryImageVersionProperties != nil { - objectMap["properties"] = givu.GalleryImageVersionProperties - } - if givu.Tags != nil { - objectMap["tags"] = givu.Tags + if pec.PrivateEndpointConnectionProperties != nil { + objectMap["properties"] = pec.PrivateEndpointConnectionProperties } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryImageVersionUpdate struct. -func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct. +func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -6277,12 +11156,12 @@ func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var galleryImageVersionProperties GalleryImageVersionProperties - err = json.Unmarshal(*v, &galleryImageVersionProperties) + var privateEndpointConnectionProperties PrivateEndpointConnectionProperties + err = json.Unmarshal(*v, &privateEndpointConnectionProperties) if err != nil { return err } - givu.GalleryImageVersionProperties = &galleryImageVersionProperties + pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties } case "id": if v != nil { @@ -6291,7 +11170,7 @@ func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - givu.ID = &ID + pec.ID = &ID } case "name": if v != nil { @@ -6300,7 +11179,7 @@ func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - givu.Name = &name + pec.Name = &name } case "type": if v != nil { @@ -6309,16 +11188,7 @@ func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - givu.Type = &typeVar - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - givu.Tags = tags + pec.Type = &typeVar } } } @@ -6326,26 +11196,27 @@ func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { return nil } -// GalleryList the List Galleries operation response. -type GalleryList struct { +// PrivateEndpointConnectionListResult a list of private link resources +type PrivateEndpointConnectionListResult struct { autorest.Response `json:"-"` - // Value - A list of galleries. - Value *[]Gallery `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of galleries. Call ListNext() with this to fetch the next page of galleries. + // Value - Array of private endpoint connections + Value *[]PrivateEndpointConnection `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. NextLink *string `json:"nextLink,omitempty"` } -// GalleryListIterator provides access to a complete listing of Gallery values. -type GalleryListIterator struct { +// PrivateEndpointConnectionListResultIterator provides access to a complete listing of +// PrivateEndpointConnection values. +type PrivateEndpointConnectionListResultIterator struct { i int - page GalleryListPage + page PrivateEndpointConnectionListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *GalleryListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *PrivateEndpointConnectionListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -6370,67 +11241,67 @@ func (iter *GalleryListIterator) NextWithContext(ctx context.Context) (err error // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *GalleryListIterator) Next() error { +func (iter *PrivateEndpointConnectionListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter GalleryListIterator) NotDone() bool { +func (iter PrivateEndpointConnectionListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter GalleryListIterator) Response() GalleryList { +func (iter PrivateEndpointConnectionListResultIterator) Response() PrivateEndpointConnectionListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter GalleryListIterator) Value() Gallery { +func (iter PrivateEndpointConnectionListResultIterator) Value() PrivateEndpointConnection { if !iter.page.NotDone() { - return Gallery{} + return PrivateEndpointConnection{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the GalleryListIterator type. -func NewGalleryListIterator(page GalleryListPage) GalleryListIterator { - return GalleryListIterator{page: page} +// Creates a new instance of the PrivateEndpointConnectionListResultIterator type. +func NewPrivateEndpointConnectionListResultIterator(page PrivateEndpointConnectionListResultPage) PrivateEndpointConnectionListResultIterator { + return PrivateEndpointConnectionListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (gl GalleryList) IsEmpty() bool { - return gl.Value == nil || len(*gl.Value) == 0 +func (peclr PrivateEndpointConnectionListResult) IsEmpty() bool { + return peclr.Value == nil || len(*peclr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (gl GalleryList) hasNextLink() bool { - return gl.NextLink != nil && len(*gl.NextLink) != 0 +func (peclr PrivateEndpointConnectionListResult) hasNextLink() bool { + return peclr.NextLink != nil && len(*peclr.NextLink) != 0 } -// galleryListPreparer prepares a request to retrieve the next set of results. +// privateEndpointConnectionListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (gl GalleryList) galleryListPreparer(ctx context.Context) (*http.Request, error) { - if !gl.hasNextLink() { +func (peclr PrivateEndpointConnectionListResult) privateEndpointConnectionListResultPreparer(ctx context.Context) (*http.Request, error) { + if !peclr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(gl.NextLink))) + autorest.WithBaseURL(to.String(peclr.NextLink))) } -// GalleryListPage contains a page of Gallery values. -type GalleryListPage struct { - fn func(context.Context, GalleryList) (GalleryList, error) - gl GalleryList +// PrivateEndpointConnectionListResultPage contains a page of PrivateEndpointConnection values. +type PrivateEndpointConnectionListResultPage struct { + fn func(context.Context, PrivateEndpointConnectionListResult) (PrivateEndpointConnectionListResult, error) + peclr PrivateEndpointConnectionListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *GalleryListPage) NextWithContext(ctx context.Context) (err error) { +func (page *PrivateEndpointConnectionListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/GalleryListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -6440,11 +11311,11 @@ func (page *GalleryListPage) NextWithContext(ctx context.Context) (err error) { }() } for { - next, err := page.fn(ctx, page.gl) + next, err := page.fn(ctx, page.peclr) if err != nil { return err } - page.gl = next + page.peclr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -6455,105 +11326,81 @@ func (page *GalleryListPage) NextWithContext(ctx context.Context) (err error) { // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *GalleryListPage) Next() error { +func (page *PrivateEndpointConnectionListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page GalleryListPage) NotDone() bool { - return !page.gl.IsEmpty() +func (page PrivateEndpointConnectionListResultPage) NotDone() bool { + return !page.peclr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page GalleryListPage) Response() GalleryList { - return page.gl +func (page PrivateEndpointConnectionListResultPage) Response() PrivateEndpointConnectionListResult { + return page.peclr } // Values returns the slice of values for the current page or nil if there are no values. -func (page GalleryListPage) Values() []Gallery { - if page.gl.IsEmpty() { +func (page PrivateEndpointConnectionListResultPage) Values() []PrivateEndpointConnection { + if page.peclr.IsEmpty() { return nil } - return *page.gl.Value -} - -// Creates a new instance of the GalleryListPage type. -func NewGalleryListPage(cur GalleryList, getNextPage func(context.Context, GalleryList) (GalleryList, error)) GalleryListPage { - return GalleryListPage{ - fn: getNextPage, - gl: cur, - } -} - -// GalleryOSDiskImage this is the OS disk image. -type GalleryOSDiskImage struct { - // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created. - SizeInGB *int32 `json:"sizeInGB,omitempty"` - // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' - HostCaching HostCaching `json:"hostCaching,omitempty"` - Source *GalleryArtifactVersionSource `json:"source,omitempty"` + return *page.peclr.Value } -// MarshalJSON is the custom marshaler for GalleryOSDiskImage. -func (godi GalleryOSDiskImage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if godi.HostCaching != "" { - objectMap["hostCaching"] = godi.HostCaching - } - if godi.Source != nil { - objectMap["source"] = godi.Source +// Creates a new instance of the PrivateEndpointConnectionListResultPage type. +func NewPrivateEndpointConnectionListResultPage(cur PrivateEndpointConnectionListResult, getNextPage func(context.Context, PrivateEndpointConnectionListResult) (PrivateEndpointConnectionListResult, error)) PrivateEndpointConnectionListResultPage { + return PrivateEndpointConnectionListResultPage{ + fn: getNextPage, + peclr: cur, } - return json.Marshal(objectMap) } -// GalleryProperties describes the properties of a Shared Image Gallery. -type GalleryProperties struct { - // Description - The description of this Shared Image Gallery resource. This property is updatable. - Description *string `json:"description,omitempty"` - Identifier *GalleryIdentifier `json:"identifier,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateUpdating', 'ProvisioningStateFailed', 'ProvisioningStateSucceeded', 'ProvisioningStateDeleting', 'ProvisioningStateMigrating' - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties. +type PrivateEndpointConnectionProperties struct { + // PrivateEndpoint - READ-ONLY; The resource of private end point. + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` + // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between DiskAccess and Virtual Network. + PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` + // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed' + ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` } -// MarshalJSON is the custom marshaler for GalleryProperties. -func (gp GalleryProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for PrivateEndpointConnectionProperties. +func (pecp PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if gp.Description != nil { - objectMap["description"] = gp.Description + if pecp.PrivateLinkServiceConnectionState != nil { + objectMap["privateLinkServiceConnectionState"] = pecp.PrivateLinkServiceConnectionState } - if gp.Identifier != nil { - objectMap["identifier"] = gp.Identifier + if pecp.ProvisioningState != "" { + objectMap["provisioningState"] = pecp.ProvisioningState } return json.Marshal(objectMap) } -// GalleryUpdate specifies information about the Shared Image Gallery that you want to update. -type GalleryUpdate struct { - *GalleryProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id +// PrivateLinkResource a private link resource +type PrivateLinkResource struct { + // PrivateLinkResourceProperties - Resource properties. + *PrivateLinkResourceProperties `json:"properties,omitempty"` + // ID - READ-ONLY; private link resource Id ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name + // Name - READ-ONLY; private link resource name Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type + // Type - READ-ONLY; private link resource type Type *string `json:"type,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for GalleryUpdate. -func (gu GalleryUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for PrivateLinkResource. +func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if gu.GalleryProperties != nil { - objectMap["properties"] = gu.GalleryProperties - } - if gu.Tags != nil { - objectMap["tags"] = gu.Tags + if plr.PrivateLinkResourceProperties != nil { + objectMap["properties"] = plr.PrivateLinkResourceProperties } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for GalleryUpdate struct. -func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct. +func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -6563,12 +11410,12 @@ func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var galleryProperties GalleryProperties - err = json.Unmarshal(*v, &galleryProperties) + var privateLinkResourceProperties PrivateLinkResourceProperties + err = json.Unmarshal(*v, &privateLinkResourceProperties) if err != nil { return err } - gu.GalleryProperties = &galleryProperties + plr.PrivateLinkResourceProperties = &privateLinkResourceProperties } case "id": if v != nil { @@ -6577,7 +11424,7 @@ func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - gu.ID = &ID + plr.ID = &ID } case "name": if v != nil { @@ -6586,7 +11433,7 @@ func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - gu.Name = &name + plr.Name = &name } case "type": if v != nil { @@ -6595,16 +11442,7 @@ func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { if err != nil { return err } - gu.Type = &typeVar - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - gu.Tags = tags + plr.Type = &typeVar } } } @@ -6612,28 +11450,54 @@ func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { return nil } -// GrantAccessData data used for requesting a SAS. -type GrantAccessData struct { - // Access - Possible values include: 'AccessLevelNone', 'AccessLevelRead', 'AccessLevelWrite' - Access AccessLevel `json:"access,omitempty"` - // DurationInSeconds - Time duration in seconds until the SAS access expires. - DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +// PrivateLinkResourceListResult a list of private link resources +type PrivateLinkResourceListResult struct { + autorest.Response `json:"-"` + // Value - Array of private link resources + Value *[]PrivateLinkResource `json:"value,omitempty"` } -// HardwareProfile specifies the hardware settings for the virtual machine. -type HardwareProfile struct { - // VMSize - Specifies the size of the virtual machine.

The enum data type is currently deprecated and will be removed by December 23rd 2023.

Recommended way to get the list of available sizes is using these APIs:

[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

[List all available virtual machine sizes in a region]( https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list)

[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes).

The available VM sizes depend on region and availability set. Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24' - VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` +// PrivateLinkResourceProperties properties of a private link resource. +type PrivateLinkResourceProperties struct { + // GroupID - READ-ONLY; The private link resource group id. + GroupID *string `json:"groupId,omitempty"` + // RequiredMembers - READ-ONLY; The private link resource required member names. + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + // RequiredZoneNames - The private link resource DNS zone name. + RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"` } -// Image the source user image virtual hard disk. The virtual hard disk will be copied before being -// attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not -// exist. -type Image struct { +// MarshalJSON is the custom marshaler for PrivateLinkResourceProperties. +func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if plrp.RequiredZoneNames != nil { + objectMap["requiredZoneNames"] = plrp.RequiredZoneNames + } + return json.Marshal(objectMap) +} + +// PrivateLinkServiceConnectionState a collection of information about the state of the connection between +// service consumer and provider. +type PrivateLinkServiceConnectionState struct { + // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected' + Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"` + // Description - The reason for approval/rejection of the connection. + Description *string `json:"description,omitempty"` + // ActionsRequired - A message indicating if changes on the service provider require any updates on the consumer. + ActionsRequired *string `json:"actionsRequired,omitempty"` +} + +// PropertyUpdatesInProgress properties of the disk for which update is pending. +type PropertyUpdatesInProgress struct { + // TargetTier - The target performance tier of the disk if a tier change operation is in progress. + TargetTier *string `json:"targetTier,omitempty"` +} + +// ProximityPlacementGroup specifies information about the proximity placement group. +type ProximityPlacementGroup struct { autorest.Response `json:"-"` - *ImageProperties `json:"properties,omitempty"` - // ExtendedLocation - The extended location of the Image. - ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + // ProximityPlacementGroupProperties - Describes the properties of a Proximity Placement Group. + *ProximityPlacementGroupProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -6646,26 +11510,23 @@ type Image struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for Image. -func (i Image) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ProximityPlacementGroup. +func (ppg ProximityPlacementGroup) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if i.ImageProperties != nil { - objectMap["properties"] = i.ImageProperties - } - if i.ExtendedLocation != nil { - objectMap["extendedLocation"] = i.ExtendedLocation + if ppg.ProximityPlacementGroupProperties != nil { + objectMap["properties"] = ppg.ProximityPlacementGroupProperties } - if i.Location != nil { - objectMap["location"] = i.Location + if ppg.Location != nil { + objectMap["location"] = ppg.Location } - if i.Tags != nil { - objectMap["tags"] = i.Tags + if ppg.Tags != nil { + objectMap["tags"] = ppg.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for Image struct. -func (i *Image) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for ProximityPlacementGroup struct. +func (ppg *ProximityPlacementGroup) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -6675,21 +11536,12 @@ func (i *Image) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var imageProperties ImageProperties - err = json.Unmarshal(*v, &imageProperties) - if err != nil { - return err - } - i.ImageProperties = &imageProperties - } - case "extendedLocation": - if v != nil { - var extendedLocation ExtendedLocation - err = json.Unmarshal(*v, &extendedLocation) + var proximityPlacementGroupProperties ProximityPlacementGroupProperties + err = json.Unmarshal(*v, &proximityPlacementGroupProperties) if err != nil { return err } - i.ExtendedLocation = &extendedLocation + ppg.ProximityPlacementGroupProperties = &proximityPlacementGroupProperties } case "id": if v != nil { @@ -6698,7 +11550,7 @@ func (i *Image) UnmarshalJSON(body []byte) error { if err != nil { return err } - i.ID = &ID + ppg.ID = &ID } case "name": if v != nil { @@ -6707,7 +11559,7 @@ func (i *Image) UnmarshalJSON(body []byte) error { if err != nil { return err } - i.Name = &name + ppg.Name = &name } case "type": if v != nil { @@ -6716,7 +11568,7 @@ func (i *Image) UnmarshalJSON(body []byte) error { if err != nil { return err } - i.Type = &typeVar + ppg.Type = &typeVar } case "location": if v != nil { @@ -6725,7 +11577,7 @@ func (i *Image) UnmarshalJSON(body []byte) error { if err != nil { return err } - i.Location = &location + ppg.Location = &location } case "tags": if v != nil { @@ -6734,7 +11586,7 @@ func (i *Image) UnmarshalJSON(body []byte) error { if err != nil { return err } - i.Tags = tags + ppg.Tags = tags } } } @@ -6742,72 +11594,27 @@ func (i *Image) UnmarshalJSON(body []byte) error { return nil } -// ImageDataDisk describes a data disk. -type ImageDataDisk struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` - // Snapshot - The snapshot. - Snapshot *SubResource `json:"snapshot,omitempty"` - // ManagedDisk - The managedDisk. - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - // BlobURI - The Virtual Hard Disk. - BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` - // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. - DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` -} - -// ImageDisk describes a image disk. -type ImageDisk struct { - // Snapshot - The snapshot. - Snapshot *SubResource `json:"snapshot,omitempty"` - // ManagedDisk - The managedDisk. - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - // BlobURI - The Virtual Hard Disk. - BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` - // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. - DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` -} - -// ImageDiskReference the source image used for creating the disk. -type ImageDiskReference struct { - // ID - A relative uri containing either a Platform Image Repository or user image reference. - ID *string `json:"id,omitempty"` - // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. - Lun *int32 `json:"lun,omitempty"` -} - -// ImageListResult the List Image operation response. -type ImageListResult struct { +// ProximityPlacementGroupListResult the List Proximity Placement Group operation response. +type ProximityPlacementGroupListResult struct { autorest.Response `json:"-"` - // Value - The list of Images. - Value *[]Image `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images. + // Value - The list of proximity placement groups + Value *[]ProximityPlacementGroup `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of proximity placement groups. NextLink *string `json:"nextLink,omitempty"` } -// ImageListResultIterator provides access to a complete listing of Image values. -type ImageListResultIterator struct { +// ProximityPlacementGroupListResultIterator provides access to a complete listing of +// ProximityPlacementGroup values. +type ProximityPlacementGroupListResultIterator struct { i int - page ImageListResultPage + page ProximityPlacementGroupListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *ImageListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *ProximityPlacementGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ImageListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -6832,67 +11639,67 @@ func (iter *ImageListResultIterator) NextWithContext(ctx context.Context) (err e // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *ImageListResultIterator) Next() error { +func (iter *ProximityPlacementGroupListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ImageListResultIterator) NotDone() bool { +func (iter ProximityPlacementGroupListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter ImageListResultIterator) Response() ImageListResult { +func (iter ProximityPlacementGroupListResultIterator) Response() ProximityPlacementGroupListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter ImageListResultIterator) Value() Image { +func (iter ProximityPlacementGroupListResultIterator) Value() ProximityPlacementGroup { if !iter.page.NotDone() { - return Image{} + return ProximityPlacementGroup{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the ImageListResultIterator type. -func NewImageListResultIterator(page ImageListResultPage) ImageListResultIterator { - return ImageListResultIterator{page: page} +// Creates a new instance of the ProximityPlacementGroupListResultIterator type. +func NewProximityPlacementGroupListResultIterator(page ProximityPlacementGroupListResultPage) ProximityPlacementGroupListResultIterator { + return ProximityPlacementGroupListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (ilr ImageListResult) IsEmpty() bool { - return ilr.Value == nil || len(*ilr.Value) == 0 +func (ppglr ProximityPlacementGroupListResult) IsEmpty() bool { + return ppglr.Value == nil || len(*ppglr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (ilr ImageListResult) hasNextLink() bool { - return ilr.NextLink != nil && len(*ilr.NextLink) != 0 +func (ppglr ProximityPlacementGroupListResult) hasNextLink() bool { + return ppglr.NextLink != nil && len(*ppglr.NextLink) != 0 } -// imageListResultPreparer prepares a request to retrieve the next set of results. +// proximityPlacementGroupListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (ilr ImageListResult) imageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !ilr.hasNextLink() { +func (ppglr ProximityPlacementGroupListResult) proximityPlacementGroupListResultPreparer(ctx context.Context) (*http.Request, error) { + if !ppglr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(ilr.NextLink))) + autorest.WithBaseURL(to.String(ppglr.NextLink))) } -// ImageListResultPage contains a page of Image values. -type ImageListResultPage struct { - fn func(context.Context, ImageListResult) (ImageListResult, error) - ilr ImageListResult +// ProximityPlacementGroupListResultPage contains a page of ProximityPlacementGroup values. +type ProximityPlacementGroupListResultPage struct { + fn func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error) + ppglr ProximityPlacementGroupListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *ImageListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *ProximityPlacementGroupListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ImageListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -6902,11 +11709,11 @@ func (page *ImageListResultPage) NextWithContext(ctx context.Context) (err error }() } for { - next, err := page.fn(ctx, page.ilr) + next, err := page.fn(ctx, page.ppglr) if err != nil { return err } - page.ilr = next + page.ppglr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -6917,469 +11724,425 @@ func (page *ImageListResultPage) NextWithContext(ctx context.Context) (err error // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *ImageListResultPage) Next() error { +func (page *ProximityPlacementGroupListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ImageListResultPage) NotDone() bool { - return !page.ilr.IsEmpty() +func (page ProximityPlacementGroupListResultPage) NotDone() bool { + return !page.ppglr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page ImageListResultPage) Response() ImageListResult { - return page.ilr +func (page ProximityPlacementGroupListResultPage) Response() ProximityPlacementGroupListResult { + return page.ppglr } // Values returns the slice of values for the current page or nil if there are no values. -func (page ImageListResultPage) Values() []Image { - if page.ilr.IsEmpty() { +func (page ProximityPlacementGroupListResultPage) Values() []ProximityPlacementGroup { + if page.ppglr.IsEmpty() { return nil } - return *page.ilr.Value + return *page.ppglr.Value } -// Creates a new instance of the ImageListResultPage type. -func NewImageListResultPage(cur ImageListResult, getNextPage func(context.Context, ImageListResult) (ImageListResult, error)) ImageListResultPage { - return ImageListResultPage{ - fn: getNextPage, - ilr: cur, +// Creates a new instance of the ProximityPlacementGroupListResultPage type. +func NewProximityPlacementGroupListResultPage(cur ProximityPlacementGroupListResult, getNextPage func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error)) ProximityPlacementGroupListResultPage { + return ProximityPlacementGroupListResultPage{ + fn: getNextPage, + ppglr: cur, } } -// ImageOSDisk describes an Operating System disk. -type ImageOSDisk struct { - // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // OsState - The OS State. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized' - OsState OperatingSystemStateTypes `json:"osState,omitempty"` - // Snapshot - The snapshot. - Snapshot *SubResource `json:"snapshot,omitempty"` - // ManagedDisk - The managedDisk. - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - // BlobURI - The Virtual Hard Disk. - BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` - // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk. - DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` -} - -// ImageProperties describes the properties of an Image. -type ImageProperties struct { - // SourceVirtualMachine - The source virtual machine from which Image is created. - SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` - // StorageProfile - Specifies the storage settings for the virtual machine disks. - StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // HyperVGeneration - Specifies the HyperVGenerationType of the VirtualMachine created from the image. From API Version 2019-03-01 if the image source is a blob, then we need the user to specify the value, if the source is managed resource like disk or snapshot, we may require the user to specify the property if we cannot deduce it from the source managed resource. Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2' - HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"` +// ProximityPlacementGroupProperties describes the properties of a Proximity Placement Group. +type ProximityPlacementGroupProperties struct { + // ProximityPlacementGroupType - Specifies the type of the proximity placement group.

Possible values are:

**Standard** : Co-locate resources within an Azure region or Availability Zone.

**Ultra** : For future use. Possible values include: 'ProximityPlacementGroupTypeStandard', 'ProximityPlacementGroupTypeUltra' + ProximityPlacementGroupType ProximityPlacementGroupType `json:"proximityPlacementGroupType,omitempty"` + // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the proximity placement group. + VirtualMachines *[]SubResourceWithColocationStatus `json:"virtualMachines,omitempty"` + // VirtualMachineScaleSets - READ-ONLY; A list of references to all virtual machine scale sets in the proximity placement group. + VirtualMachineScaleSets *[]SubResourceWithColocationStatus `json:"virtualMachineScaleSets,omitempty"` + // AvailabilitySets - READ-ONLY; A list of references to all availability sets in the proximity placement group. + AvailabilitySets *[]SubResourceWithColocationStatus `json:"availabilitySets,omitempty"` + // ColocationStatus - Describes colocation status of the Proximity Placement Group. + ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` } -// MarshalJSON is the custom marshaler for ImageProperties. -func (IP ImageProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ProximityPlacementGroupProperties. +func (ppgp ProximityPlacementGroupProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if IP.SourceVirtualMachine != nil { - objectMap["sourceVirtualMachine"] = IP.SourceVirtualMachine + if ppgp.ProximityPlacementGroupType != "" { + objectMap["proximityPlacementGroupType"] = ppgp.ProximityPlacementGroupType } - if IP.StorageProfile != nil { - objectMap["storageProfile"] = IP.StorageProfile + if ppgp.ColocationStatus != nil { + objectMap["colocationStatus"] = ppgp.ColocationStatus } - if IP.HyperVGeneration != "" { - objectMap["hyperVGeneration"] = IP.HyperVGeneration + return json.Marshal(objectMap) +} + +// ProximityPlacementGroupUpdate specifies information about the proximity placement group. +type ProximityPlacementGroupUpdate struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ProximityPlacementGroupUpdate. +func (ppgu ProximityPlacementGroupUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ppgu.Tags != nil { + objectMap["tags"] = ppgu.Tags } return json.Marshal(objectMap) } -// ImagePurchasePlan describes the gallery Image Definition purchase plan. This is used by marketplace -// images. -type ImagePurchasePlan struct { - // Name - The plan ID. +// ProxyOnlyResource the ProxyOnly Resource model definition. +type ProxyOnlyResource struct { + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name Name *string `json:"name,omitempty"` - // Publisher - The publisher ID. - Publisher *string `json:"publisher,omitempty"` - // Product - The product ID. - Product *string `json:"product,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` } -// ImageReference specifies information about the image to use. You can specify information about platform -// images, marketplace images, or virtual machine images. This element is required when you want to use a -// platform image, marketplace image, or virtual machine image, but is not used in other creation -// operations. NOTE: Image reference publisher and offer can only be set when you create the scale set. -type ImageReference struct { - // Publisher - The image publisher. - Publisher *string `json:"publisher,omitempty"` - // Offer - Specifies the offer of the platform image or marketplace image used to create the virtual machine. - Offer *string `json:"offer,omitempty"` - // Sku - The image SKU. - Sku *string `json:"sku,omitempty"` - // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. - Version *string `json:"version,omitempty"` - // ExactVersion - READ-ONLY; Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual machine. This readonly field differs from 'version', only if the value specified in 'version' field is 'latest'. - ExactVersion *string `json:"exactVersion,omitempty"` - // ID - Resource Id +// MarshalJSON is the custom marshaler for ProxyOnlyResource. +func (por ProxyOnlyResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ProxyResource the resource model definition for an Azure Resource Manager proxy resource. It will not +// have tags and a location +type ProxyResource struct { + // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` } -// MarshalJSON is the custom marshaler for ImageReference. -func (ir ImageReference) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ProxyResource. +func (pr ProxyResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if ir.Publisher != nil { - objectMap["publisher"] = ir.Publisher - } - if ir.Offer != nil { - objectMap["offer"] = ir.Offer - } - if ir.Sku != nil { - objectMap["sku"] = ir.Sku - } - if ir.Version != nil { - objectMap["version"] = ir.Version - } - if ir.ID != nil { - objectMap["id"] = ir.ID - } return json.Marshal(objectMap) } -// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type ImagesCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ImagesClient) (Image, error) +// PublicIPAddressSku describes the public IP Sku +type PublicIPAddressSku struct { + // Name - Specify public IP sku name. Possible values include: 'PublicIPAddressSkuNameBasic', 'PublicIPAddressSkuNameStandard' + Name PublicIPAddressSkuName `json:"name,omitempty"` + // Tier - Specify public IP sku tier. Possible values include: 'PublicIPAddressSkuTierRegional', 'PublicIPAddressSkuTierGlobal' + Tier PublicIPAddressSkuTier `json:"tier,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil +// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace. +type PurchasePlan struct { + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` + // PromotionCode - The Offer Promotion Code. + PromotionCode *string `json:"promotionCode,omitempty"` } -// result is the default implementation for ImagesCreateOrUpdateFuture.Result. -func (future *ImagesCreateOrUpdateFuture) result(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - i.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.CreateOrUpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return +// ReadCloser ... +type ReadCloser struct { + autorest.Response `json:"-"` + Value *io.ReadCloser `json:"value,omitempty"` } -// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type ImagesDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ImagesClient) (autorest.Response, error) +// RecommendedMachineConfiguration the properties describe the recommended machine configuration for this +// Image Definition. These properties are updatable. +type RecommendedMachineConfiguration struct { + VCPUs *ResourceRange `json:"vCPUs,omitempty"` + Memory *ResourceRange `json:"memory,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ImagesDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil +// RecoveryWalkResponse response after calling a manual recovery walk +type RecoveryWalkResponse struct { + autorest.Response `json:"-"` + // WalkPerformed - READ-ONLY; Whether the recovery walk was performed + WalkPerformed *bool `json:"walkPerformed,omitempty"` + // NextPlatformUpdateDomain - READ-ONLY; The next update domain that needs to be walked. Null means walk spanning all update domains has been completed + NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"` } -// result is the default implementation for ImagesDeleteFuture.Result. -func (future *ImagesDeleteFuture) result(client ImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") - return - } - ar.Response = future.Response() - return +// MarshalJSON is the custom marshaler for RecoveryWalkResponse. +func (rwr RecoveryWalkResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// RegionalReplicationStatus this is the regional replication status. +type RegionalReplicationStatus struct { + // Region - READ-ONLY; The region to which the gallery image version is being replicated to. + Region *string `json:"region,omitempty"` + // State - READ-ONLY; This is the regional replication state. Possible values include: 'ReplicationStateUnknown', 'ReplicationStateReplicating', 'ReplicationStateCompleted', 'ReplicationStateFailed' + State ReplicationState `json:"state,omitempty"` + // Details - READ-ONLY; The details of the replication status. + Details *string `json:"details,omitempty"` + // Progress - READ-ONLY; It indicates progress of the replication job. + Progress *int32 `json:"progress,omitempty"` } -// ImageStorageProfile describes a storage profile. -type ImageStorageProfile struct { - // OsDisk - Specifies information about the operating system disk used by the virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - OsDisk *ImageOSDisk `json:"osDisk,omitempty"` - // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"` - // ZoneResilient - Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS). - ZoneResilient *bool `json:"zoneResilient,omitempty"` +// MarshalJSON is the custom marshaler for RegionalReplicationStatus. +func (rrs RegionalReplicationStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// ImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type ImagesUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ImagesClient) (Image, error) +// ReplicationStatus this is the replication status of the gallery image version. +type ReplicationStatus struct { + // AggregatedState - READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'AggregatedReplicationStateUnknown', 'AggregatedReplicationStateInProgress', 'AggregatedReplicationStateCompleted', 'AggregatedReplicationStateFailed' + AggregatedState AggregatedReplicationState `json:"aggregatedState,omitempty"` + // Summary - READ-ONLY; This is a summary of replication status for each region. + Summary *[]RegionalReplicationStatus `json:"summary,omitempty"` } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ImagesUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil +// MarshalJSON is the custom marshaler for ReplicationStatus. +func (rs ReplicationStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// result is the default implementation for ImagesUpdateFuture.Result. -func (future *ImagesUpdateFuture) result(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - i.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.UpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return +// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api. +type RequestRateByIntervalInput struct { + // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'IntervalInMinsThreeMins', 'IntervalInMinsFiveMins', 'IntervalInMinsThirtyMins', 'IntervalInMinsSixtyMins' + IntervalLength IntervalInMins `json:"intervalLength,omitempty"` + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + // GroupByClientApplicationID - Group query result by Client Application ID. + GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` + // GroupByUserAgent - Group query result by User Agent. + GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` } -// ImageUpdate the source user image virtual hard disk. Only tags may be updated. -type ImageUpdate struct { - *ImageProperties `json:"properties,omitempty"` +// Resource the Resource model definition. +type Resource struct { + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for ImageUpdate. -func (iu ImageUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if iu.ImageProperties != nil { - objectMap["properties"] = iu.ImageProperties + if r.Location != nil { + objectMap["location"] = r.Location } - if iu.Tags != nil { - objectMap["tags"] = iu.Tags + if r.Tags != nil { + objectMap["tags"] = r.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for ImageUpdate struct. -func (iu *ImageUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var imageProperties ImageProperties - err = json.Unmarshal(*v, &imageProperties) - if err != nil { - return err - } - iu.ImageProperties = &imageProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - iu.Tags = tags - } - } +// ResourceInstanceViewStatus instance view status. +type ResourceInstanceViewStatus struct { + // Code - READ-ONLY; The status code. + Code *string `json:"code,omitempty"` + // DisplayStatus - READ-ONLY; The short localizable label for the status. + DisplayStatus *string `json:"displayStatus,omitempty"` + // Message - READ-ONLY; The detailed status message, including for alerts and error messages. + Message *string `json:"message,omitempty"` + // Time - READ-ONLY; The time of the status. + Time *date.Time `json:"time,omitempty"` + // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError' + Level StatusLevelTypes `json:"level,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceInstanceViewStatus. +func (rivs ResourceInstanceViewStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rivs.Level != "" { + objectMap["level"] = rivs.Level } + return json.Marshal(objectMap) +} - return nil +// ResourceRange describes the resource range. +type ResourceRange struct { + // Min - The minimum number of the resource. + Min *int32 `json:"min,omitempty"` + // Max - The maximum number of the resource. + Max *int32 `json:"max,omitempty"` } -// InnerError inner error details. -type InnerError struct { - // Exceptiontype - The exception type. - Exceptiontype *string `json:"exceptiontype,omitempty"` - // Errordetail - The internal error message or exception dump. - Errordetail *string `json:"errordetail,omitempty"` +// ResourceSku describes an available Compute SKU. +type ResourceSku struct { + // ResourceType - READ-ONLY; The type of resource the SKU applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Name - READ-ONLY; The name of SKU. + Name *string `json:"name,omitempty"` + // Tier - READ-ONLY; Specifies the tier of virtual machines in a scale set.

Possible Values:

**Standard**

**Basic** + Tier *string `json:"tier,omitempty"` + // Size - READ-ONLY; The Size of the SKU. + Size *string `json:"size,omitempty"` + // Family - READ-ONLY; The Family of this particular SKU. + Family *string `json:"family,omitempty"` + // Kind - READ-ONLY; The Kind of resources that are supported in this SKU. + Kind *string `json:"kind,omitempty"` + // Capacity - READ-ONLY; Specifies the number of virtual machines in the scale set. + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + // Locations - READ-ONLY; The set of locations that the SKU is available. + Locations *[]string `json:"locations,omitempty"` + // LocationInfo - READ-ONLY; A list of locations and availability zones in those locations where the SKU is available. + LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"` + // APIVersions - READ-ONLY; The api versions that support this SKU. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Costs - READ-ONLY; Metadata for retrieving price info. + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + // Capabilities - READ-ONLY; A name value pair to describe the capability. + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + // Restrictions - READ-ONLY; The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` } -// InstanceViewStatus instance view status. -type InstanceViewStatus struct { - // Code - The status code. - Code *string `json:"code,omitempty"` - // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError' - Level StatusLevelTypes `json:"level,omitempty"` - // DisplayStatus - The short localizable label for the status. - DisplayStatus *string `json:"displayStatus,omitempty"` - // Message - The detailed status message, including for alerts and error messages. - Message *string `json:"message,omitempty"` - // Time - The time of the status. - Time *date.Time `json:"time,omitempty"` +// MarshalJSON is the custom marshaler for ResourceSku. +func (rs ResourceSku) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// KeyForDiskEncryptionSet key Vault Key Url to be used for server side encryption of Managed Disks and -// Snapshots -type KeyForDiskEncryptionSet struct { - // SourceVault - Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription. - SourceVault *SourceVault `json:"sourceVault,omitempty"` - // KeyURL - Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. - KeyURL *string `json:"keyUrl,omitempty"` +// ResourceSkuCapabilities describes The SKU capabilities object. +type ResourceSkuCapabilities struct { + // Name - READ-ONLY; An invariant to describe the feature. + Name *string `json:"name,omitempty"` + // Value - READ-ONLY; An invariant if the feature is measured by quantity. + Value *string `json:"value,omitempty"` } -// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used -// to unwrap the encryptionKey -type KeyVaultAndKeyReference struct { - // SourceVault - Resource id of the KeyVault containing the key or secret - SourceVault *SourceVault `json:"sourceVault,omitempty"` - // KeyURL - Url pointing to a key or secret in KeyVault - KeyURL *string `json:"keyUrl,omitempty"` +// MarshalJSON is the custom marshaler for ResourceSkuCapabilities. +func (rsc ResourceSkuCapabilities) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key -type KeyVaultAndSecretReference struct { - // SourceVault - Resource id of the KeyVault containing the key or secret - SourceVault *SourceVault `json:"sourceVault,omitempty"` - // SecretURL - Url pointing to a key or secret in KeyVault - SecretURL *string `json:"secretUrl,omitempty"` +// ResourceSkuCapacity describes scaling information of a SKU. +type ResourceSkuCapacity struct { + // Minimum - READ-ONLY; The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - READ-ONLY; The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // Default - READ-ONLY; The default capacity. + Default *int64 `json:"default,omitempty"` + // ScaleType - READ-ONLY; The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone' + ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` } -// KeyVaultKeyReference describes a reference to Key Vault Key -type KeyVaultKeyReference struct { - // KeyURL - The URL referencing a key encryption key in Key Vault. - KeyURL *string `json:"keyUrl,omitempty"` - // SourceVault - The relative URL of the Key Vault containing the key. - SourceVault *SubResource `json:"sourceVault,omitempty"` +// MarshalJSON is the custom marshaler for ResourceSkuCapacity. +func (rsc ResourceSkuCapacity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkuCosts describes metadata for retrieving price info. +type ResourceSkuCosts struct { + // MeterID - READ-ONLY; Used for querying price from commerce. + MeterID *string `json:"meterID,omitempty"` + // Quantity - READ-ONLY; The multiplier is needed to extend the base metered cost. + Quantity *int64 `json:"quantity,omitempty"` + // ExtendedUnit - READ-ONLY; An invariant to show the extended unit. + ExtendedUnit *string `json:"extendedUnit,omitempty"` } -// KeyVaultSecretReference describes a reference to Key Vault Secret -type KeyVaultSecretReference struct { - // SecretURL - The URL referencing a secret in a Key Vault. - SecretURL *string `json:"secretUrl,omitempty"` - // SourceVault - The relative URL of the Key Vault containing the secret. - SourceVault *SubResource `json:"sourceVault,omitempty"` +// MarshalJSON is the custom marshaler for ResourceSkuCosts. +func (rsc ResourceSkuCosts) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// LastPatchInstallationSummary describes the properties of the last installed patch summary. -type LastPatchInstallationSummary struct { - // Status - READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values include: 'PatchOperationStatusUnknown', 'PatchOperationStatusInProgress', 'PatchOperationStatusFailed', 'PatchOperationStatusSucceeded', 'PatchOperationStatusCompletedWithWarnings' - Status PatchOperationStatus `json:"status,omitempty"` - // InstallationActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs. - InstallationActivityID *string `json:"installationActivityId,omitempty"` - // MaintenanceWindowExceeded - READ-ONLY; Describes whether the operation ran out of time before it completed all its intended actions - MaintenanceWindowExceeded *bool `json:"maintenanceWindowExceeded,omitempty"` - // NotSelectedPatchCount - READ-ONLY; The number of all available patches but not going to be installed because it didn't match a classification or inclusion list entry. - NotSelectedPatchCount *int32 `json:"notSelectedPatchCount,omitempty"` - // ExcludedPatchCount - READ-ONLY; The number of all available patches but excluded explicitly by a customer-specified exclusion list match. - ExcludedPatchCount *int32 `json:"excludedPatchCount,omitempty"` - // PendingPatchCount - READ-ONLY; The number of all available patches expected to be installed over the course of the patch installation operation. - PendingPatchCount *int32 `json:"pendingPatchCount,omitempty"` - // InstalledPatchCount - READ-ONLY; The count of patches that successfully installed. - InstalledPatchCount *int32 `json:"installedPatchCount,omitempty"` - // FailedPatchCount - READ-ONLY; The count of patches that failed installation. - FailedPatchCount *int32 `json:"failedPatchCount,omitempty"` - // StartTime - READ-ONLY; The UTC timestamp when the operation began. - StartTime *date.Time `json:"startTime,omitempty"` - // LastModifiedTime - READ-ONLY; The UTC timestamp when the operation began. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Error - READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them. - Error *APIError `json:"error,omitempty"` +// ResourceSkuLocationInfo describes an available Compute SKU Location Information. +type ResourceSkuLocationInfo struct { + // Location - READ-ONLY; Location of the SKU + Location *string `json:"location,omitempty"` + // Zones - READ-ONLY; List of availability zones where the SKU is supported. + Zones *[]string `json:"zones,omitempty"` + // ZoneDetails - READ-ONLY; Details of capabilities available to a SKU in specific zones. + ZoneDetails *[]ResourceSkuZoneDetails `json:"zoneDetails,omitempty"` + // ExtendedLocations - READ-ONLY; The names of extended locations. + ExtendedLocations *[]string `json:"extendedLocations,omitempty"` + // Type - READ-ONLY; The type of the extended location. Possible values include: 'ExtendedLocationTypeEdgeZone' + Type ExtendedLocationType `json:"type,omitempty"` } -// MarshalJSON is the custom marshaler for LastPatchInstallationSummary. -func (lpis LastPatchInstallationSummary) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ResourceSkuLocationInfo. +func (rsli ResourceSkuLocationInfo) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// LinuxConfiguration specifies the Linux operating system settings on the virtual machine.

For a -// list of supported Linux distributions, see [Linux on Azure-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) -//

For running non-endorsed distributions, see [Information for Non-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). -type LinuxConfiguration struct { - // DisablePasswordAuthentication - Specifies whether password authentication should be disabled. - DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` - // SSH - Specifies the ssh key configuration for a Linux OS. - SSH *SSHConfiguration `json:"ssh,omitempty"` - // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. - ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` - // PatchSettings - [Preview Feature] Specifies settings related to VM Guest Patching on Linux. - PatchSettings *LinuxPatchSettings `json:"patchSettings,omitempty"` +// ResourceSkuRestrictionInfo describes an available Compute SKU Restriction Information. +type ResourceSkuRestrictionInfo struct { + // Locations - READ-ONLY; Locations where the SKU is restricted + Locations *[]string `json:"locations,omitempty"` + // Zones - READ-ONLY; List of availability zones where the SKU is restricted. + Zones *[]string `json:"zones,omitempty"` } -// LinuxParameters input for InstallPatches on a Linux VM, as directly received by the API -type LinuxParameters struct { - // ClassificationsToInclude - The update classifications to select when installing patches for Linux. - ClassificationsToInclude *[]VMGuestPatchClassificationLinux `json:"classificationsToInclude,omitempty"` - // PackageNameMasksToInclude - packages to include in the patch operation. Format: packageName_packageVersion - PackageNameMasksToInclude *[]string `json:"packageNameMasksToInclude,omitempty"` - // PackageNameMasksToExclude - packages to exclude in the patch operation. Format: packageName_packageVersion - PackageNameMasksToExclude *[]string `json:"packageNameMasksToExclude,omitempty"` - // MaintenanceRunID - This is used as a maintenance run identifier for Auto VM Guest Patching in Linux. - MaintenanceRunID *string `json:"maintenanceRunId,omitempty"` +// MarshalJSON is the custom marshaler for ResourceSkuRestrictionInfo. +func (rsri ResourceSkuRestrictionInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// LinuxPatchSettings specifies settings related to VM Guest Patching on Linux. -type LinuxPatchSettings struct { - // PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine.

Possible values are:

**ImageDefault** - The virtual machine's default patching configuration is used.

**AutomaticByPlatform** - The virtual machine will be automatically updated by the platform. The property provisionVMAgent must be true. Possible values include: 'LinuxVMGuestPatchModeImageDefault', 'LinuxVMGuestPatchModeAutomaticByPlatform' - PatchMode LinuxVMGuestPatchMode `json:"patchMode,omitempty"` +// ResourceSkuRestrictions describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + // Type - READ-ONLY; The type of restrictions. Possible values include: 'ResourceSkuRestrictionsTypeLocation', 'ResourceSkuRestrictionsTypeZone' + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // RestrictionInfo - READ-ONLY; The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` + // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'ResourceSkuRestrictionsReasonCodeQuotaID', 'ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription' + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` } -// ListUsagesResult the List Usages operation response. -type ListUsagesResult struct { +// MarshalJSON is the custom marshaler for ResourceSkuRestrictions. +func (rsr ResourceSkuRestrictions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceSkusResult the List Resource Skus operation response. +type ResourceSkusResult struct { autorest.Response `json:"-"` - // Value - The list of compute resource usages. - Value *[]Usage `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. + // Value - The list of skus available for the subscription. + Value *[]ResourceSku `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of Resource Skus. Call ListNext() with this URI to fetch the next page of Resource Skus NextLink *string `json:"nextLink,omitempty"` } -// ListUsagesResultIterator provides access to a complete listing of Usage values. -type ListUsagesResultIterator struct { +// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values. +type ResourceSkusResultIterator struct { i int - page ListUsagesResultPage + page ResourceSkusResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *ListUsagesResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *ResourceSkusResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -7404,67 +12167,67 @@ func (iter *ListUsagesResultIterator) NextWithContext(ctx context.Context) (err // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *ListUsagesResultIterator) Next() error { +func (iter *ResourceSkusResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ListUsagesResultIterator) NotDone() bool { +func (iter ResourceSkusResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter ListUsagesResultIterator) Response() ListUsagesResult { +func (iter ResourceSkusResultIterator) Response() ResourceSkusResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter ListUsagesResultIterator) Value() Usage { +func (iter ResourceSkusResultIterator) Value() ResourceSku { if !iter.page.NotDone() { - return Usage{} + return ResourceSku{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the ListUsagesResultIterator type. -func NewListUsagesResultIterator(page ListUsagesResultPage) ListUsagesResultIterator { - return ListUsagesResultIterator{page: page} +// Creates a new instance of the ResourceSkusResultIterator type. +func NewResourceSkusResultIterator(page ResourceSkusResultPage) ResourceSkusResultIterator { + return ResourceSkusResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (lur ListUsagesResult) IsEmpty() bool { - return lur.Value == nil || len(*lur.Value) == 0 +func (rsr ResourceSkusResult) IsEmpty() bool { + return rsr.Value == nil || len(*rsr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (lur ListUsagesResult) hasNextLink() bool { - return lur.NextLink != nil && len(*lur.NextLink) != 0 +func (rsr ResourceSkusResult) hasNextLink() bool { + return rsr.NextLink != nil && len(*rsr.NextLink) != 0 } -// listUsagesResultPreparer prepares a request to retrieve the next set of results. +// resourceSkusResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (lur ListUsagesResult) listUsagesResultPreparer(ctx context.Context) (*http.Request, error) { - if !lur.hasNextLink() { +func (rsr ResourceSkusResult) resourceSkusResultPreparer(ctx context.Context) (*http.Request, error) { + if !rsr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(lur.NextLink))) + autorest.WithBaseURL(to.String(rsr.NextLink))) } -// ListUsagesResultPage contains a page of Usage values. -type ListUsagesResultPage struct { - fn func(context.Context, ListUsagesResult) (ListUsagesResult, error) - lur ListUsagesResult +// ResourceSkusResultPage contains a page of ResourceSku values. +type ResourceSkusResultPage struct { + fn func(context.Context, ResourceSkusResult) (ResourceSkusResult, error) + rsr ResourceSkusResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *ListUsagesResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *ResourceSkusResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -7474,325 +12237,247 @@ func (page *ListUsagesResultPage) NextWithContext(ctx context.Context) (err erro }() } for { - next, err := page.fn(ctx, page.lur) - if err != nil { - return err - } - page.lur = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ListUsagesResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ListUsagesResultPage) NotDone() bool { - return !page.lur.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ListUsagesResultPage) Response() ListUsagesResult { - return page.lur -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ListUsagesResultPage) Values() []Usage { - if page.lur.IsEmpty() { - return nil - } - return *page.lur.Value -} - -// Creates a new instance of the ListUsagesResultPage type. -func NewListUsagesResultPage(cur ListUsagesResult, getNextPage func(context.Context, ListUsagesResult) (ListUsagesResult, error)) ListUsagesResultPage { - return ListUsagesResultPage{ - fn: getNextPage, - lur: cur, - } -} - -// ListVirtualMachineExtensionImage ... -type ListVirtualMachineExtensionImage struct { - autorest.Response `json:"-"` - Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` -} - -// ListVirtualMachineImageResource ... -type ListVirtualMachineImageResource struct { - autorest.Response `json:"-"` - Value *[]VirtualMachineImageResource `json:"value,omitempty"` -} - -// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results -// of a long-running operation. -type LogAnalyticsExportRequestRateByIntervalFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *LogAnalyticsExportRequestRateByIntervalFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for LogAnalyticsExportRequestRateByIntervalFuture.Result. -func (future *LogAnalyticsExportRequestRateByIntervalFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - laor.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) + next, err := page.fn(ctx, page.rsr) if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") + return err + } + page.rsr = next + if !next.hasNextLink() || !next.IsEmpty() { + break } } - return + return nil } -// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type LogAnalyticsExportThrottledRequestsFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error) +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ResourceSkusResultPage) Next() error { + return page.NextWithContext(context.Background()) } -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *LogAnalyticsExportThrottledRequestsFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceSkusResultPage) NotDone() bool { + return !page.rsr.IsEmpty() } -// result is the default implementation for LogAnalyticsExportThrottledRequestsFuture.Result. -func (future *LogAnalyticsExportThrottledRequestsFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - laor.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") - } +// Response returns the raw server response from the last page request. +func (page ResourceSkusResultPage) Response() ResourceSkusResult { + return page.rsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceSkusResultPage) Values() []ResourceSku { + if page.rsr.IsEmpty() { + return nil } - return + return *page.rsr.Value } -// LogAnalyticsInputBase api input base class for LogAnalytics Api. -type LogAnalyticsInputBase struct { - // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. - BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` - // FromTime - From time of the query - FromTime *date.Time `json:"fromTime,omitempty"` - // ToTime - To time of the query - ToTime *date.Time `json:"toTime,omitempty"` - // GroupByThrottlePolicy - Group query result by Throttle Policy applied. - GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by Operation Name. - GroupByOperationName *bool `json:"groupByOperationName,omitempty"` - // GroupByResourceName - Group query result by Resource Name. - GroupByResourceName *bool `json:"groupByResourceName,omitempty"` - // GroupByClientApplicationID - Group query result by Client Application ID. - GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` - // GroupByUserAgent - Group query result by User Agent. - GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` +// Creates a new instance of the ResourceSkusResultPage type. +func NewResourceSkusResultPage(cur ResourceSkusResult, getNextPage func(context.Context, ResourceSkusResult) (ResourceSkusResult, error)) ResourceSkusResultPage { + return ResourceSkusResultPage{ + fn: getNextPage, + rsr: cur, + } } -// LogAnalyticsOperationResult logAnalytics operation status response -type LogAnalyticsOperationResult struct { - autorest.Response `json:"-"` - // Properties - READ-ONLY; LogAnalyticsOutput - Properties *LogAnalyticsOutput `json:"properties,omitempty"` +// ResourceSkuZoneDetails describes The zonal capabilities of a SKU. +type ResourceSkuZoneDetails struct { + // Name - READ-ONLY; The set of zones that the SKU is available in with the specified capabilities. + Name *[]string `json:"name,omitempty"` + // Capabilities - READ-ONLY; A list of capabilities that are available for the SKU in the specified list of zones. + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` } -// MarshalJSON is the custom marshaler for LogAnalyticsOperationResult. -func (laor LogAnalyticsOperationResult) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ResourceSkuZoneDetails. +func (rszd ResourceSkuZoneDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// LogAnalyticsOutput logAnalytics output properties -type LogAnalyticsOutput struct { - // Output - READ-ONLY; Output file Uri path to blob container. - Output *string `json:"output,omitempty"` +// ResourceURIList the List resources which are encrypted with the disk encryption set. +type ResourceURIList struct { + autorest.Response `json:"-"` + // Value - A list of IDs or Owner IDs of resources which are encrypted with the disk encryption set. + Value *[]string `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of encrypted resources. Call ListNext() with this to fetch the next page of encrypted resources. + NextLink *string `json:"nextLink,omitempty"` } -// MarshalJSON is the custom marshaler for LogAnalyticsOutput. -func (lao LogAnalyticsOutput) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// ResourceURIListIterator provides access to a complete listing of string values. +type ResourceURIListIterator struct { + i int + page ResourceURIListPage } -// MaintenanceRedeployStatus maintenance Operation Status. -type MaintenanceRedeployStatus struct { - // IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance. - IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` - // PreMaintenanceWindowStartTime - Start Time for the Pre Maintenance Window. - PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"` - // PreMaintenanceWindowEndTime - End Time for the Pre Maintenance Window. - PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"` - // MaintenanceWindowStartTime - Start Time for the Maintenance Window. - MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"` - // MaintenanceWindowEndTime - End Time for the Maintenance Window. - MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"` - // LastOperationResultCode - The Last Maintenance Operation Result Code. Possible values include: 'MaintenanceOperationResultCodeTypesNone', 'MaintenanceOperationResultCodeTypesRetryLater', 'MaintenanceOperationResultCodeTypesMaintenanceAborted', 'MaintenanceOperationResultCodeTypesMaintenanceCompleted' - LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` - // LastOperationMessage - Message returned for the last Maintenance Operation. - LastOperationMessage *string `json:"lastOperationMessage,omitempty"` +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ResourceURIListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceURIListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil } -// ManagedArtifact the managed artifact. -type ManagedArtifact struct { - // ID - The managed artifact id. - ID *string `json:"id,omitempty"` +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ResourceURIListIterator) Next() error { + return iter.NextWithContext(context.Background()) } -// ManagedDiskParameters the parameters of a managed disk. -type ManagedDiskParameters struct { - // StorageAccountType - Specifies the storage account type for the managed disk. Managed OS disk storage account type can only be set when you create the scale set. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` - // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk. - DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ResourceURIListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) } -// NetworkInterfaceReference describes a network interface reference. -type NetworkInterfaceReference struct { - *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` +// Response returns the raw server response from the last page request. +func (iter ResourceURIListIterator) Response() ResourceURIList { + return iter.page.Response() } -// MarshalJSON is the custom marshaler for NetworkInterfaceReference. -func (nir NetworkInterfaceReference) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if nir.NetworkInterfaceReferenceProperties != nil { - objectMap["properties"] = nir.NetworkInterfaceReferenceProperties - } - if nir.ID != nil { - objectMap["id"] = nir.ID +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ResourceURIListIterator) Value() string { + if !iter.page.NotDone() { + return "" } - return json.Marshal(objectMap) + return iter.page.Values()[iter.i] } -// UnmarshalJSON is the custom unmarshaler for NetworkInterfaceReference struct. -func (nir *NetworkInterfaceReference) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err +// Creates a new instance of the ResourceURIListIterator type. +func NewResourceURIListIterator(page ResourceURIListPage) ResourceURIListIterator { + return ResourceURIListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rul ResourceURIList) IsEmpty() bool { + return rul.Value == nil || len(*rul.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (rul ResourceURIList) hasNextLink() bool { + return rul.NextLink != nil && len(*rul.NextLink) != 0 +} + +// resourceURIListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rul ResourceURIList) resourceURIListPreparer(ctx context.Context) (*http.Request, error) { + if !rul.hasNextLink() { + return nil, nil } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var networkInterfaceReferenceProperties NetworkInterfaceReferenceProperties - err = json.Unmarshal(*v, &networkInterfaceReferenceProperties) - if err != nil { - return err - } - nir.NetworkInterfaceReferenceProperties = &networkInterfaceReferenceProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - nir.ID = &ID + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rul.NextLink))) +} + +// ResourceURIListPage contains a page of string values. +type ResourceURIListPage struct { + fn func(context.Context, ResourceURIList) (ResourceURIList, error) + rul ResourceURIList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ResourceURIListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceURIListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.rul) + if err != nil { + return err + } + page.rul = next + if !next.hasNextLink() || !next.IsEmpty() { + break } } - return nil } -// NetworkInterfaceReferenceProperties describes a network interface reference properties. -type NetworkInterfaceReferenceProperties struct { - // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. - Primary *bool `json:"primary,omitempty"` +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ResourceURIListPage) Next() error { + return page.NextWithContext(context.Background()) } -// NetworkProfile specifies the network interfaces of the virtual machine. -type NetworkProfile struct { - // NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine. - NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceURIListPage) NotDone() bool { + return !page.rul.IsEmpty() } -// OperationListResult the List Compute Operation operation response. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The list of compute operations - Value *[]OperationValue `json:"value,omitempty"` +// Response returns the raw server response from the last page request. +func (page ResourceURIListPage) Response() ResourceURIList { + return page.rul } -// MarshalJSON is the custom marshaler for OperationListResult. -func (olr OperationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceURIListPage) Values() []string { + if page.rul.IsEmpty() { + return nil + } + return *page.rul.Value } -// OperationValue describes the properties of a Compute Operation value. -type OperationValue struct { - // Origin - READ-ONLY; The origin of the compute operation. - Origin *string `json:"origin,omitempty"` - // Name - READ-ONLY; The name of the compute operation. - Name *string `json:"name,omitempty"` - *OperationValueDisplay `json:"display,omitempty"` +// Creates a new instance of the ResourceURIListPage type. +func NewResourceURIListPage(cur ResourceURIList, getNextPage func(context.Context, ResourceURIList) (ResourceURIList, error)) ResourceURIListPage { + return ResourceURIListPage{ + fn: getNextPage, + rul: cur, + } } -// MarshalJSON is the custom marshaler for OperationValue. -func (ov OperationValue) MarshalJSON() ([]byte, error) { +// RestorePoint restore Point details. +type RestorePoint struct { + autorest.Response `json:"-"` + *RestorePointProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RestorePoint. +func (rp RestorePoint) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if ov.OperationValueDisplay != nil { - objectMap["display"] = ov.OperationValueDisplay + if rp.RestorePointProperties != nil { + objectMap["properties"] = rp.RestorePointProperties } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for OperationValue struct. -func (ov *OperationValue) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for RestorePoint struct. +func (rp *RestorePoint) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -7800,14 +12485,23 @@ func (ov *OperationValue) UnmarshalJSON(body []byte) error { } for k, v := range m { switch k { - case "origin": + case "properties": if v != nil { - var origin string - err = json.Unmarshal(*v, &origin) + var restorePointProperties RestorePointProperties + err = json.Unmarshal(*v, &restorePointProperties) if err != nil { return err } - ov.Origin = &origin + rp.RestorePointProperties = &restorePointProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rp.ID = &ID } case "name": if v != nil { @@ -7816,209 +12510,56 @@ func (ov *OperationValue) UnmarshalJSON(body []byte) error { if err != nil { return err } - ov.Name = &name + rp.Name = &name } - case "display": + case "type": if v != nil { - var operationValueDisplay OperationValueDisplay - err = json.Unmarshal(*v, &operationValueDisplay) + var typeVar string + err = json.Unmarshal(*v, &typeVar) if err != nil { return err } - ov.OperationValueDisplay = &operationValueDisplay + rp.Type = &typeVar } } } - return nil -} - -// OperationValueDisplay describes the properties of a Compute Operation Value Display. -type OperationValueDisplay struct { - // Operation - READ-ONLY; The display name of the compute operation. - Operation *string `json:"operation,omitempty"` - // Resource - READ-ONLY; The display name of the resource the operation applies to. - Resource *string `json:"resource,omitempty"` - // Description - READ-ONLY; The description of the operation. - Description *string `json:"description,omitempty"` - // Provider - READ-ONLY; The resource provider for the operation. - Provider *string `json:"provider,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationValueDisplay. -func (ovd OperationValueDisplay) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OrchestrationServiceStateInput the input for OrchestrationServiceState -type OrchestrationServiceStateInput struct { - // ServiceName - The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs' - ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"` - // Action - The action to be performed. Possible values include: 'OrchestrationServiceStateActionResume', 'OrchestrationServiceStateActionSuspend' - Action OrchestrationServiceStateAction `json:"action,omitempty"` -} - -// OrchestrationServiceSummary summary for an orchestration service of a virtual machine scale set. -type OrchestrationServiceSummary struct { - // ServiceName - READ-ONLY; The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs', 'OrchestrationServiceNamesDummyOrchestrationServiceName' - ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"` - // ServiceState - READ-ONLY; The current state of the service. Possible values include: 'OrchestrationServiceStateNotRunning', 'OrchestrationServiceStateRunning', 'OrchestrationServiceStateSuspended' - ServiceState OrchestrationServiceState `json:"serviceState,omitempty"` -} - -// MarshalJSON is the custom marshaler for OrchestrationServiceSummary. -func (oss OrchestrationServiceSummary) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OSDisk specifies information about the operating system disk used by the virtual machine.

For -// more information about disks, see [About disks and VHDs for Azure virtual -// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -type OSDisk struct { - // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

Possible values are:

**Windows**

**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 - EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Vhd - The virtual hard disk. - Vhd *VirtualHardDisk `json:"vhd,omitempty"` - // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. - Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None** for Standard storage. **ReadOnly** for Premium storage. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // DiffDiskSettings - Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. - DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// OSDiskImage contains the os disk image information. -type OSDiskImage struct { - // OperatingSystem - The operating system of the osDiskImage. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` -} - -// OSDiskImageEncryption contains encryption settings for an OS disk image. -type OSDiskImageEncryption struct { - // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set. - DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` -} - -// OSProfile specifies the operating system settings for the virtual machine. Some of the settings cannot -// be changed once VM is provisioned. -type OSProfile struct { - // ComputerName - Specifies the host OS name of the virtual machine.

This name cannot be updated after the VM is created.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). - ComputerName *string `json:"computerName,omitempty"` - // AdminUsername - Specifies the name of the administrator account.

This property cannot be updated after the VM is created.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) - AdminUsername *string `json:"adminUsername,omitempty"` - // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) - AdminPassword *string `json:"adminPassword,omitempty"` - // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    **Note: Do not pass any secrets or passwords in customData property**

    This property cannot be updated after the VM is created.

    customData is passed to the VM to be saved as a file, for more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/)

    For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) - CustomData *string `json:"customData,omitempty"` - // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. - WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` - // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` - // Secrets - Specifies set of certificates that should be installed onto the virtual machine. - Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` - // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.

    This may only be set to False when no extensions are present on the virtual machine. - AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` - // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required to infer provision success of the virtual machine. **Note: This property is for private testing only, and all customers must not set the property to false.** - RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"` -} - -// PatchInstallationDetail information about a specific patch that was encountered during an installation -// action. -type PatchInstallationDetail struct { - // PatchID - READ-ONLY; A unique identifier for the patch. - PatchID *string `json:"patchId,omitempty"` - // Name - READ-ONLY; The friendly name of the patch. - Name *string `json:"name,omitempty"` - // Version - READ-ONLY; The version string of the package. It may conform to Semantic Versioning. Only applies to Linux. - Version *string `json:"version,omitempty"` - // KbID - READ-ONLY; The KBID of the patch. Only applies to Windows patches. - KbID *string `json:"kbId,omitempty"` - // Classifications - READ-ONLY; The classification(s) of the patch as provided by the patch publisher. - Classifications *[]string `json:"classifications,omitempty"` - // InstallationState - READ-ONLY; The state of the patch after the installation operation completed. Possible values include: 'PatchInstallationStateUnknown', 'PatchInstallationStateInstalled', 'PatchInstallationStateFailed', 'PatchInstallationStateExcluded', 'PatchInstallationStateNotSelected', 'PatchInstallationStatePending' - InstallationState PatchInstallationState `json:"installationState,omitempty"` -} - -// MarshalJSON is the custom marshaler for PatchInstallationDetail. -func (pid PatchInstallationDetail) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// PatchSettings specifies settings related to VM Guest Patching on Windows. -type PatchSettings struct { - // PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine.

    Possible values are:

    **Manual** - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the property WindowsConfiguration.enableAutomaticUpdates must be false

    **AutomaticByOS** - The virtual machine will automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates must be true.

    **AutomaticByPlatform** - the virtual machine will automatically updated by the platform. The properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true. Possible values include: 'WindowsVMGuestPatchModeManual', 'WindowsVMGuestPatchModeAutomaticByOS', 'WindowsVMGuestPatchModeAutomaticByPlatform' - PatchMode WindowsVMGuestPatchMode `json:"patchMode,omitempty"` - // EnableHotpatching - Enables customers to patch their Azure VMs without requiring a reboot. For enableHotpatching, the 'provisionVMAgent' must be set to true and 'patchMode' must be set to 'AutomaticByPlatform'. - EnableHotpatching *bool `json:"enableHotpatching,omitempty"` -} - -// Plan specifies information about the marketplace image used to create the virtual machine. This element -// is only used for marketplace images. Before you can use a marketplace image from an API, you must enable -// the image for programmatic use. In the Azure portal, find the marketplace image that you want to use -// and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and -// then click **Save**. -type Plan struct { - // Name - The plan ID. - Name *string `json:"name,omitempty"` - // Publisher - The publisher ID. - Publisher *string `json:"publisher,omitempty"` - // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. - Product *string `json:"product,omitempty"` - // PromotionCode - The promotion code. - PromotionCode *string `json:"promotionCode,omitempty"` -} - -// PrivateEndpoint the Private Endpoint resource. -type PrivateEndpoint struct { - // ID - READ-ONLY; The ARM identifier for Private Endpoint - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateEndpoint. -func (peVar PrivateEndpoint) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) + return nil } -// PrivateEndpointConnection the Private Endpoint Connection resource. -type PrivateEndpointConnection struct { - autorest.Response `json:"-"` - // PrivateEndpointConnectionProperties - Resource properties. - *PrivateEndpointConnectionProperties `json:"properties,omitempty"` - // ID - READ-ONLY; private endpoint connection Id +// RestorePointCollection create or update Restore Point collection parameters. +type RestorePointCollection struct { + autorest.Response `json:"-"` + *RestorePointCollectionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` - // Name - READ-ONLY; private endpoint connection name + // Name - READ-ONLY; Resource name Name *string `json:"name,omitempty"` - // Type - READ-ONLY; private endpoint connection type + // Type - READ-ONLY; Resource type Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for PrivateEndpointConnection. -func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RestorePointCollection. +func (RPCVar RestorePointCollection) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if pec.PrivateEndpointConnectionProperties != nil { - objectMap["properties"] = pec.PrivateEndpointConnectionProperties + if RPCVar.RestorePointCollectionProperties != nil { + objectMap["properties"] = RPCVar.RestorePointCollectionProperties + } + if RPCVar.Location != nil { + objectMap["location"] = RPCVar.Location + } + if RPCVar.Tags != nil { + objectMap["tags"] = RPCVar.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct. -func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for RestorePointCollection struct. +func (RPCVar *RestorePointCollection) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -8028,12 +12569,12 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var privateEndpointConnectionProperties PrivateEndpointConnectionProperties - err = json.Unmarshal(*v, &privateEndpointConnectionProperties) + var restorePointCollectionProperties RestorePointCollectionProperties + err = json.Unmarshal(*v, &restorePointCollectionProperties) if err != nil { return err } - pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties + RPCVar.RestorePointCollectionProperties = &restorePointCollectionProperties } case "id": if v != nil { @@ -8042,7 +12583,7 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { if err != nil { return err } - pec.ID = &ID + RPCVar.ID = &ID } case "name": if v != nil { @@ -8051,7 +12592,7 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { if err != nil { return err } - pec.Name = &name + RPCVar.Name = &name } case "type": if v != nil { @@ -8060,7 +12601,25 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { if err != nil { return err } - pec.Type = &typeVar + RPCVar.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + RPCVar.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + RPCVar.Tags = tags } } } @@ -8068,27 +12627,27 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { return nil } -// PrivateEndpointConnectionListResult a list of private link resources -type PrivateEndpointConnectionListResult struct { +// RestorePointCollectionListResult the List restore point collection operation response. +type RestorePointCollectionListResult struct { autorest.Response `json:"-"` - // Value - Array of private endpoint connections - Value *[]PrivateEndpointConnection `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. + // Value - Gets the list of restore point collections. + Value *[]RestorePointCollection `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of RestorePointCollections. Call ListNext() with this to fetch the next page of RestorePointCollections NextLink *string `json:"nextLink,omitempty"` } -// PrivateEndpointConnectionListResultIterator provides access to a complete listing of -// PrivateEndpointConnection values. -type PrivateEndpointConnectionListResultIterator struct { +// RestorePointCollectionListResultIterator provides access to a complete listing of RestorePointCollection +// values. +type RestorePointCollectionListResultIterator struct { i int - page PrivateEndpointConnectionListResultPage + page RestorePointCollectionListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *PrivateEndpointConnectionListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *RestorePointCollectionListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -8113,67 +12672,67 @@ func (iter *PrivateEndpointConnectionListResultIterator) NextWithContext(ctx con // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *PrivateEndpointConnectionListResultIterator) Next() error { +func (iter *RestorePointCollectionListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter PrivateEndpointConnectionListResultIterator) NotDone() bool { +func (iter RestorePointCollectionListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter PrivateEndpointConnectionListResultIterator) Response() PrivateEndpointConnectionListResult { +func (iter RestorePointCollectionListResultIterator) Response() RestorePointCollectionListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter PrivateEndpointConnectionListResultIterator) Value() PrivateEndpointConnection { +func (iter RestorePointCollectionListResultIterator) Value() RestorePointCollection { if !iter.page.NotDone() { - return PrivateEndpointConnection{} + return RestorePointCollection{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the PrivateEndpointConnectionListResultIterator type. -func NewPrivateEndpointConnectionListResultIterator(page PrivateEndpointConnectionListResultPage) PrivateEndpointConnectionListResultIterator { - return PrivateEndpointConnectionListResultIterator{page: page} +// Creates a new instance of the RestorePointCollectionListResultIterator type. +func NewRestorePointCollectionListResultIterator(page RestorePointCollectionListResultPage) RestorePointCollectionListResultIterator { + return RestorePointCollectionListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (peclr PrivateEndpointConnectionListResult) IsEmpty() bool { - return peclr.Value == nil || len(*peclr.Value) == 0 +func (rpclr RestorePointCollectionListResult) IsEmpty() bool { + return rpclr.Value == nil || len(*rpclr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (peclr PrivateEndpointConnectionListResult) hasNextLink() bool { - return peclr.NextLink != nil && len(*peclr.NextLink) != 0 +func (rpclr RestorePointCollectionListResult) hasNextLink() bool { + return rpclr.NextLink != nil && len(*rpclr.NextLink) != 0 } -// privateEndpointConnectionListResultPreparer prepares a request to retrieve the next set of results. +// restorePointCollectionListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (peclr PrivateEndpointConnectionListResult) privateEndpointConnectionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !peclr.hasNextLink() { +func (rpclr RestorePointCollectionListResult) restorePointCollectionListResultPreparer(ctx context.Context) (*http.Request, error) { + if !rpclr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(peclr.NextLink))) + autorest.WithBaseURL(to.String(rpclr.NextLink))) } -// PrivateEndpointConnectionListResultPage contains a page of PrivateEndpointConnection values. -type PrivateEndpointConnectionListResultPage struct { - fn func(context.Context, PrivateEndpointConnectionListResult) (PrivateEndpointConnectionListResult, error) - peclr PrivateEndpointConnectionListResult +// RestorePointCollectionListResultPage contains a page of RestorePointCollection values. +type RestorePointCollectionListResultPage struct { + fn func(context.Context, RestorePointCollectionListResult) (RestorePointCollectionListResult, error) + rpclr RestorePointCollectionListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *PrivateEndpointConnectionListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *RestorePointCollectionListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -8183,11 +12742,11 @@ func (page *PrivateEndpointConnectionListResultPage) NextWithContext(ctx context }() } for { - next, err := page.fn(ctx, page.peclr) + next, err := page.fn(ctx, page.rpclr) if err != nil { return err } - page.peclr = next + page.rpclr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -8198,81 +12757,132 @@ func (page *PrivateEndpointConnectionListResultPage) NextWithContext(ctx context // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *PrivateEndpointConnectionListResultPage) Next() error { +func (page *RestorePointCollectionListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page PrivateEndpointConnectionListResultPage) NotDone() bool { - return !page.peclr.IsEmpty() +func (page RestorePointCollectionListResultPage) NotDone() bool { + return !page.rpclr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page PrivateEndpointConnectionListResultPage) Response() PrivateEndpointConnectionListResult { - return page.peclr +func (page RestorePointCollectionListResultPage) Response() RestorePointCollectionListResult { + return page.rpclr } // Values returns the slice of values for the current page or nil if there are no values. -func (page PrivateEndpointConnectionListResultPage) Values() []PrivateEndpointConnection { - if page.peclr.IsEmpty() { +func (page RestorePointCollectionListResultPage) Values() []RestorePointCollection { + if page.rpclr.IsEmpty() { return nil } - return *page.peclr.Value + return *page.rpclr.Value } -// Creates a new instance of the PrivateEndpointConnectionListResultPage type. -func NewPrivateEndpointConnectionListResultPage(cur PrivateEndpointConnectionListResult, getNextPage func(context.Context, PrivateEndpointConnectionListResult) (PrivateEndpointConnectionListResult, error)) PrivateEndpointConnectionListResultPage { - return PrivateEndpointConnectionListResultPage{ +// Creates a new instance of the RestorePointCollectionListResultPage type. +func NewRestorePointCollectionListResultPage(cur RestorePointCollectionListResult, getNextPage func(context.Context, RestorePointCollectionListResult) (RestorePointCollectionListResult, error)) RestorePointCollectionListResultPage { + return RestorePointCollectionListResultPage{ fn: getNextPage, - peclr: cur, + rpclr: cur, } } -// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties. -type PrivateEndpointConnectionProperties struct { - // PrivateEndpoint - READ-ONLY; The resource of private end point. - PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` - // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between DiskAccess and Virtual Network. - PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` - // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed' - ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` +// RestorePointCollectionProperties the restore point collection properties. +type RestorePointCollectionProperties struct { + Source *RestorePointCollectionSourceProperties `json:"source,omitempty"` + // ProvisioningState - READ-ONLY; The provisioning state of the restore point collection. + ProvisioningState *string `json:"provisioningState,omitempty"` + // RestorePointCollectionID - READ-ONLY; The unique id of the restore point collection. + RestorePointCollectionID *string `json:"restorePointCollectionId,omitempty"` + // RestorePoints - READ-ONLY; A list containing all restore points created under this restore point collection. + RestorePoints *[]RestorePoint `json:"restorePoints,omitempty"` } -// MarshalJSON is the custom marshaler for PrivateEndpointConnectionProperties. -func (pecp PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RestorePointCollectionProperties. +func (rpcp RestorePointCollectionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if pecp.PrivateLinkServiceConnectionState != nil { - objectMap["privateLinkServiceConnectionState"] = pecp.PrivateLinkServiceConnectionState - } - if pecp.ProvisioningState != "" { - objectMap["provisioningState"] = pecp.ProvisioningState + if rpcp.Source != nil { + objectMap["source"] = rpcp.Source } return json.Marshal(objectMap) } -// PrivateLinkResource a private link resource -type PrivateLinkResource struct { - // PrivateLinkResourceProperties - Resource properties. - *PrivateLinkResourceProperties `json:"properties,omitempty"` - // ID - READ-ONLY; private link resource Id +// RestorePointCollectionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type RestorePointCollectionsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(RestorePointCollectionsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RestorePointCollectionsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RestorePointCollectionsDeleteFuture.Result. +func (future *RestorePointCollectionsDeleteFuture) result(client RestorePointCollectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.RestorePointCollectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// RestorePointCollectionSourceProperties the properties of the source resource that this restore point +// collection is created from. +type RestorePointCollectionSourceProperties struct { + // Location - READ-ONLY; Location of the source resource used to create this restore point collection. + Location *string `json:"location,omitempty"` + // ID - Resource Id of the source resource used to create this restore point collection ID *string `json:"id,omitempty"` - // Name - READ-ONLY; private link resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; private link resource type - Type *string `json:"type,omitempty"` } -// MarshalJSON is the custom marshaler for PrivateLinkResource. -func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RestorePointCollectionSourceProperties. +func (rpcsp RestorePointCollectionSourceProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if plr.PrivateLinkResourceProperties != nil { - objectMap["properties"] = plr.PrivateLinkResourceProperties + if rpcsp.ID != nil { + objectMap["id"] = rpcsp.ID } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct. -func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error { +// RestorePointCollectionUpdate update Restore Point collection parameters. +type RestorePointCollectionUpdate struct { + *RestorePointCollectionProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for RestorePointCollectionUpdate. +func (rpcu RestorePointCollectionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rpcu.RestorePointCollectionProperties != nil { + objectMap["properties"] = rpcu.RestorePointCollectionProperties + } + if rpcu.Tags != nil { + objectMap["tags"] = rpcu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RestorePointCollectionUpdate struct. +func (rpcu *RestorePointCollectionUpdate) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -8282,211 +12892,288 @@ func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var privateLinkResourceProperties PrivateLinkResourceProperties - err = json.Unmarshal(*v, &privateLinkResourceProperties) - if err != nil { - return err - } - plr.PrivateLinkResourceProperties = &privateLinkResourceProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - plr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) + var restorePointCollectionProperties RestorePointCollectionProperties + err = json.Unmarshal(*v, &restorePointCollectionProperties) if err != nil { return err } - plr.Name = &name + rpcu.RestorePointCollectionProperties = &restorePointCollectionProperties } - case "type": + case "tags": if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) + var tags map[string]*string + err = json.Unmarshal(*v, &tags) if err != nil { return err } - plr.Type = &typeVar + rpcu.Tags = tags } } } - return nil + return nil +} + +// RestorePointProperties the restore point properties. +type RestorePointProperties struct { + // ExcludeDisks - List of disk resource ids that the customer wishes to exclude from the restore point. If no disks are specified, all disks will be included. + ExcludeDisks *[]APIEntityReference `json:"excludeDisks,omitempty"` + // SourceMetadata - READ-ONLY; Gets the details of the VM captured at the time of the restore point creation. + SourceMetadata *RestorePointSourceMetadata `json:"sourceMetadata,omitempty"` + // ProvisioningState - READ-ONLY; Gets the provisioning state of the restore point. + ProvisioningState *string `json:"provisioningState,omitempty"` + // ConsistencyMode - READ-ONLY; Gets the consistency mode for the restore point. Please refer to https://aka.ms/RestorePoints for more details. Possible values include: 'ConsistencyModeTypesCrashConsistent', 'ConsistencyModeTypesFileSystemConsistent', 'ConsistencyModeTypesApplicationConsistent' + ConsistencyMode ConsistencyModeTypes `json:"consistencyMode,omitempty"` + // ProvisioningDetails - READ-ONLY; Gets the provisioning details set by the server during Create restore point operation. + ProvisioningDetails *RestorePointProvisioningDetails `json:"provisioningDetails,omitempty"` +} + +// MarshalJSON is the custom marshaler for RestorePointProperties. +func (rpp RestorePointProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rpp.ExcludeDisks != nil { + objectMap["excludeDisks"] = rpp.ExcludeDisks + } + return json.Marshal(objectMap) +} + +// RestorePointProvisioningDetails restore Point Provisioning details. +type RestorePointProvisioningDetails struct { + // CreationTime - Gets the creation time of the restore point. + CreationTime *date.Time `json:"creationTime,omitempty"` + // TotalUsedSizeInBytes - Gets the total size of the data in all the disks which are part of the restore point. + TotalUsedSizeInBytes *int64 `json:"totalUsedSizeInBytes,omitempty"` + // StatusCode - Gets the status of the Create restore point operation. + StatusCode *int32 `json:"statusCode,omitempty"` + // StatusMessage - Gets the status message of the Create restore point operation. + StatusMessage *string `json:"statusMessage,omitempty"` +} + +// RestorePointsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RestorePointsCreateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(RestorePointsClient) (RestorePoint, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RestorePointsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RestorePointsCreateFuture.Result. +func (future *RestorePointsCreateFuture) result(client RestorePointsClient) (rp RestorePoint, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + rp.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.RestorePointsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rp.Response.Response, err = future.GetResult(sender); err == nil && rp.Response.Response.StatusCode != http.StatusNoContent { + rp, err = client.CreateResponder(rp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsCreateFuture", "Result", rp.Response.Response, "Failure responding to request") + } + } + return +} + +// RestorePointsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RestorePointsDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(RestorePointsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *RestorePointsDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for RestorePointsDeleteFuture.Result. +func (future *RestorePointsDeleteFuture) result(client RestorePointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("compute.RestorePointsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// RestorePointSourceMetadata describes the properties of the Virtual Machine for which the restore point +// was created. The properties provided are a subset and the snapshot of the overall Virtual Machine +// properties captured at the time of the restore point creation. +type RestorePointSourceMetadata struct { + // HardwareProfile - Gets the hardware profile. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + // StorageProfile - Gets the storage profile. + StorageProfile *RestorePointSourceVMStorageProfile `json:"storageProfile,omitempty"` + // OsProfile - Gets the OS profile. + OsProfile *OSProfile `json:"osProfile,omitempty"` + // DiagnosticsProfile - Gets the diagnostics profile. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // LicenseType - Gets the license type, which is for bring your own license scenario. + LicenseType *string `json:"licenseType,omitempty"` + // VMID - Gets the virtual machine unique id. + VMID *string `json:"vmId,omitempty"` + // SecurityProfile - Gets the security profile. + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + // Location - Location of the VM from which the restore point was created. + Location *string `json:"location,omitempty"` } -// PrivateLinkResourceListResult a list of private link resources -type PrivateLinkResourceListResult struct { - autorest.Response `json:"-"` - // Value - Array of private link resources - Value *[]PrivateLinkResource `json:"value,omitempty"` +// RestorePointSourceVMDataDisk describes a data disk. +type RestorePointSourceVMDataDisk struct { + // Lun - Gets the logical unit number. + Lun *int32 `json:"lun,omitempty"` + // Name - Gets the disk name. + Name *string `json:"name,omitempty"` + // Caching - Gets the caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Gets the initial disk size in GB for blank data disks, and the new desired size for existing OS and Data disks. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - Gets the managed disk details + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + // DiskRestorePoint - Gets the disk restore point Id. + DiskRestorePoint *APIEntityReference `json:"diskRestorePoint,omitempty"` } -// PrivateLinkResourceProperties properties of a private link resource. -type PrivateLinkResourceProperties struct { - // GroupID - READ-ONLY; The private link resource group id. - GroupID *string `json:"groupId,omitempty"` - // RequiredMembers - READ-ONLY; The private link resource required member names. - RequiredMembers *[]string `json:"requiredMembers,omitempty"` - // RequiredZoneNames - The private link resource DNS zone name. - RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"` +// RestorePointSourceVMOSDisk describes an Operating System disk. +type RestorePointSourceVMOSDisk struct { + // OsType - Gets the Operating System type. Possible values include: 'OperatingSystemTypeWindows', 'OperatingSystemTypeLinux' + OsType OperatingSystemType `json:"osType,omitempty"` + // EncryptionSettings - Gets the disk encryption settings. + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Name - Gets the disk name. + Name *string `json:"name,omitempty"` + // Caching - Gets the caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Gets the disk size in GB. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - Gets the managed disk details + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + // DiskRestorePoint - Gets the disk restore point Id. + DiskRestorePoint *APIEntityReference `json:"diskRestorePoint,omitempty"` } -// MarshalJSON is the custom marshaler for PrivateLinkResourceProperties. -func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if plrp.RequiredZoneNames != nil { - objectMap["requiredZoneNames"] = plrp.RequiredZoneNames - } - return json.Marshal(objectMap) +// RestorePointSourceVMStorageProfile describes the storage profile. +type RestorePointSourceVMStorageProfile struct { + // OsDisk - Gets the OS disk of the VM captured at the time of the restore point creation. + OsDisk *RestorePointSourceVMOSDisk `json:"osDisk,omitempty"` + // DataDisks - Gets the data disks of the VM captured at the time of the restore point creation. + DataDisks *[]RestorePointSourceVMDataDisk `json:"dataDisks,omitempty"` } -// PrivateLinkServiceConnectionState a collection of information about the state of the connection between -// service consumer and provider. -type PrivateLinkServiceConnectionState struct { - // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected' - Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"` - // Description - The reason for approval/rejection of the connection. - Description *string `json:"description,omitempty"` - // ActionsRequired - A message indicating if changes on the service provider require any updates on the consumer. - ActionsRequired *string `json:"actionsRequired,omitempty"` +// RetrieveBootDiagnosticsDataResult the SAS URIs of the console screenshot and serial log blobs. +type RetrieveBootDiagnosticsDataResult struct { + autorest.Response `json:"-"` + // ConsoleScreenshotBlobURI - READ-ONLY; The console screenshot blob URI + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` + // SerialConsoleLogBlobURI - READ-ONLY; The serial console log blob URI. + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` } -// PropertyUpdatesInProgress properties of the disk for which update is pending. -type PropertyUpdatesInProgress struct { - // TargetTier - The target performance tier of the disk if a tier change operation is in progress. - TargetTier *string `json:"targetTier,omitempty"` +// MarshalJSON is the custom marshaler for RetrieveBootDiagnosticsDataResult. +func (rbddr RetrieveBootDiagnosticsDataResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// ProximityPlacementGroup specifies information about the proximity placement group. -type ProximityPlacementGroup struct { +// RoleInstance ... +type RoleInstance struct { autorest.Response `json:"-"` - // ProximityPlacementGroupProperties - Describes the properties of a Proximity Placement Group. - *ProximityPlacementGroupProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name + // Name - READ-ONLY; Resource Name. Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type + // Type - READ-ONLY; Resource Type. Type *string `json:"type,omitempty"` - // Location - Resource location + // Location - READ-ONLY; Resource Location. Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` + // Tags - READ-ONLY; Resource tags. + Tags map[string]*string `json:"tags"` + Sku *InstanceSku `json:"sku,omitempty"` + Properties *RoleInstanceProperties `json:"properties,omitempty"` } -// MarshalJSON is the custom marshaler for ProximityPlacementGroup. -func (ppg ProximityPlacementGroup) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RoleInstance. +func (ri RoleInstance) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if ppg.ProximityPlacementGroupProperties != nil { - objectMap["properties"] = ppg.ProximityPlacementGroupProperties - } - if ppg.Location != nil { - objectMap["location"] = ppg.Location + if ri.Sku != nil { + objectMap["sku"] = ri.Sku } - if ppg.Tags != nil { - objectMap["tags"] = ppg.Tags + if ri.Properties != nil { + objectMap["properties"] = ri.Properties } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for ProximityPlacementGroup struct. -func (ppg *ProximityPlacementGroup) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var proximityPlacementGroupProperties ProximityPlacementGroupProperties - err = json.Unmarshal(*v, &proximityPlacementGroupProperties) - if err != nil { - return err - } - ppg.ProximityPlacementGroupProperties = &proximityPlacementGroupProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - ppg.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ppg.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - ppg.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - ppg.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - ppg.Tags = tags - } - } - } +// RoleInstanceInstanceView the instance view of the role instance. +type RoleInstanceInstanceView struct { + autorest.Response `json:"-"` + // PlatformUpdateDomain - READ-ONLY; The Update Domain. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + // PlatformFaultDomain - READ-ONLY; The Fault Domain. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // PrivateID - READ-ONLY; Specifies a unique identifier generated internally for the cloud service associated with this role instance.

    NOTE: If you are using Azure Diagnostics extension, this property can be used as 'DeploymentId' for querying details. + PrivateID *string `json:"privateId,omitempty"` + // Statuses - READ-ONLY + Statuses *[]ResourceInstanceViewStatus `json:"statuses,omitempty"` +} - return nil +// MarshalJSON is the custom marshaler for RoleInstanceInstanceView. +func (riiv RoleInstanceInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) } -// ProximityPlacementGroupListResult the List Proximity Placement Group operation response. -type ProximityPlacementGroupListResult struct { +// RoleInstanceListResult ... +type RoleInstanceListResult struct { autorest.Response `json:"-"` - // Value - The list of proximity placement groups - Value *[]ProximityPlacementGroup `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of proximity placement groups. - NextLink *string `json:"nextLink,omitempty"` + Value *[]RoleInstance `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// ProximityPlacementGroupListResultIterator provides access to a complete listing of -// ProximityPlacementGroup values. -type ProximityPlacementGroupListResultIterator struct { +// RoleInstanceListResultIterator provides access to a complete listing of RoleInstance values. +type RoleInstanceListResultIterator struct { i int - page ProximityPlacementGroupListResultPage + page RoleInstanceListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *ProximityPlacementGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *RoleInstanceListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/RoleInstanceListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -8511,67 +13198,67 @@ func (iter *ProximityPlacementGroupListResultIterator) NextWithContext(ctx conte // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *ProximityPlacementGroupListResultIterator) Next() error { +func (iter *RoleInstanceListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ProximityPlacementGroupListResultIterator) NotDone() bool { +func (iter RoleInstanceListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter ProximityPlacementGroupListResultIterator) Response() ProximityPlacementGroupListResult { +func (iter RoleInstanceListResultIterator) Response() RoleInstanceListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter ProximityPlacementGroupListResultIterator) Value() ProximityPlacementGroup { +func (iter RoleInstanceListResultIterator) Value() RoleInstance { if !iter.page.NotDone() { - return ProximityPlacementGroup{} + return RoleInstance{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the ProximityPlacementGroupListResultIterator type. -func NewProximityPlacementGroupListResultIterator(page ProximityPlacementGroupListResultPage) ProximityPlacementGroupListResultIterator { - return ProximityPlacementGroupListResultIterator{page: page} +// Creates a new instance of the RoleInstanceListResultIterator type. +func NewRoleInstanceListResultIterator(page RoleInstanceListResultPage) RoleInstanceListResultIterator { + return RoleInstanceListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (ppglr ProximityPlacementGroupListResult) IsEmpty() bool { - return ppglr.Value == nil || len(*ppglr.Value) == 0 +func (rilr RoleInstanceListResult) IsEmpty() bool { + return rilr.Value == nil || len(*rilr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (ppglr ProximityPlacementGroupListResult) hasNextLink() bool { - return ppglr.NextLink != nil && len(*ppglr.NextLink) != 0 +func (rilr RoleInstanceListResult) hasNextLink() bool { + return rilr.NextLink != nil && len(*rilr.NextLink) != 0 } -// proximityPlacementGroupListResultPreparer prepares a request to retrieve the next set of results. +// roleInstanceListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (ppglr ProximityPlacementGroupListResult) proximityPlacementGroupListResultPreparer(ctx context.Context) (*http.Request, error) { - if !ppglr.hasNextLink() { +func (rilr RoleInstanceListResult) roleInstanceListResultPreparer(ctx context.Context) (*http.Request, error) { + if !rilr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(ppglr.NextLink))) + autorest.WithBaseURL(to.String(rilr.NextLink))) } -// ProximityPlacementGroupListResultPage contains a page of ProximityPlacementGroup values. -type ProximityPlacementGroupListResultPage struct { - fn func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error) - ppglr ProximityPlacementGroupListResult +// RoleInstanceListResultPage contains a page of RoleInstance values. +type RoleInstanceListResultPage struct { + fn func(context.Context, RoleInstanceListResult) (RoleInstanceListResult, error) + rilr RoleInstanceListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *ProximityPlacementGroupListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *RoleInstanceListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/RoleInstanceListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -8581,11 +13268,11 @@ func (page *ProximityPlacementGroupListResultPage) NextWithContext(ctx context.C }() } for { - next, err := page.fn(ctx, page.ppglr) + next, err := page.fn(ctx, page.rilr) if err != nil { return err } - page.ppglr = next + page.rilr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -8596,183 +13283,133 @@ func (page *ProximityPlacementGroupListResultPage) NextWithContext(ctx context.C // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *ProximityPlacementGroupListResultPage) Next() error { +func (page *RoleInstanceListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ProximityPlacementGroupListResultPage) NotDone() bool { - return !page.ppglr.IsEmpty() +func (page RoleInstanceListResultPage) NotDone() bool { + return !page.rilr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page ProximityPlacementGroupListResultPage) Response() ProximityPlacementGroupListResult { - return page.ppglr +func (page RoleInstanceListResultPage) Response() RoleInstanceListResult { + return page.rilr } // Values returns the slice of values for the current page or nil if there are no values. -func (page ProximityPlacementGroupListResultPage) Values() []ProximityPlacementGroup { - if page.ppglr.IsEmpty() { +func (page RoleInstanceListResultPage) Values() []RoleInstance { + if page.rilr.IsEmpty() { return nil } - return *page.ppglr.Value -} - -// Creates a new instance of the ProximityPlacementGroupListResultPage type. -func NewProximityPlacementGroupListResultPage(cur ProximityPlacementGroupListResult, getNextPage func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error)) ProximityPlacementGroupListResultPage { - return ProximityPlacementGroupListResultPage{ - fn: getNextPage, - ppglr: cur, - } -} - -// ProximityPlacementGroupProperties describes the properties of a Proximity Placement Group. -type ProximityPlacementGroupProperties struct { - // ProximityPlacementGroupType - Specifies the type of the proximity placement group.

    Possible values are:

    **Standard** : Co-locate resources within an Azure region or Availability Zone.

    **Ultra** : For future use. Possible values include: 'ProximityPlacementGroupTypeStandard', 'ProximityPlacementGroupTypeUltra' - ProximityPlacementGroupType ProximityPlacementGroupType `json:"proximityPlacementGroupType,omitempty"` - // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the proximity placement group. - VirtualMachines *[]SubResourceWithColocationStatus `json:"virtualMachines,omitempty"` - // VirtualMachineScaleSets - READ-ONLY; A list of references to all virtual machine scale sets in the proximity placement group. - VirtualMachineScaleSets *[]SubResourceWithColocationStatus `json:"virtualMachineScaleSets,omitempty"` - // AvailabilitySets - READ-ONLY; A list of references to all availability sets in the proximity placement group. - AvailabilitySets *[]SubResourceWithColocationStatus `json:"availabilitySets,omitempty"` - // ColocationStatus - Describes colocation status of the Proximity Placement Group. - ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` -} - -// MarshalJSON is the custom marshaler for ProximityPlacementGroupProperties. -func (ppgp ProximityPlacementGroupProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ppgp.ProximityPlacementGroupType != "" { - objectMap["proximityPlacementGroupType"] = ppgp.ProximityPlacementGroupType - } - if ppgp.ColocationStatus != nil { - objectMap["colocationStatus"] = ppgp.ColocationStatus - } - return json.Marshal(objectMap) -} - -// ProximityPlacementGroupUpdate specifies information about the proximity placement group. -type ProximityPlacementGroupUpdate struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` + return *page.rilr.Value } -// MarshalJSON is the custom marshaler for ProximityPlacementGroupUpdate. -func (ppgu ProximityPlacementGroupUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ppgu.Tags != nil { - objectMap["tags"] = ppgu.Tags +// Creates a new instance of the RoleInstanceListResultPage type. +func NewRoleInstanceListResultPage(cur RoleInstanceListResult, getNextPage func(context.Context, RoleInstanceListResult) (RoleInstanceListResult, error)) RoleInstanceListResultPage { + return RoleInstanceListResultPage{ + fn: getNextPage, + rilr: cur, } - return json.Marshal(objectMap) } -// ProxyOnlyResource the ProxyOnly Resource model definition. -type ProxyOnlyResource struct { - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` +// RoleInstanceNetworkProfile describes the network profile for the role instance. +type RoleInstanceNetworkProfile struct { + // NetworkInterfaces - READ-ONLY; Specifies the list of resource Ids for the network interfaces associated with the role instance. + NetworkInterfaces *[]SubResource `json:"networkInterfaces,omitempty"` } -// MarshalJSON is the custom marshaler for ProxyOnlyResource. -func (por ProxyOnlyResource) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RoleInstanceNetworkProfile. +func (rinp RoleInstanceNetworkProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace. -type PurchasePlan struct { - // Publisher - The publisher ID. - Publisher *string `json:"publisher,omitempty"` - // Name - The plan ID. - Name *string `json:"name,omitempty"` - // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. - Product *string `json:"product,omitempty"` - // PromotionCode - The Offer Promotion Code. - PromotionCode *string `json:"promotionCode,omitempty"` +// RoleInstanceProperties ... +type RoleInstanceProperties struct { + NetworkProfile *RoleInstanceNetworkProfile `json:"networkProfile,omitempty"` + InstanceView *RoleInstanceInstanceView `json:"instanceView,omitempty"` } -// RecommendedMachineConfiguration the properties describe the recommended machine configuration for this -// Image Definition. These properties are updatable. -type RecommendedMachineConfiguration struct { - VCPUs *ResourceRange `json:"vCPUs,omitempty"` - Memory *ResourceRange `json:"memory,omitempty"` +// RoleInstances specifies a list of role instances from the cloud service. +type RoleInstances struct { + // RoleInstances - List of cloud service role instance names. Value of '*' will signify all role instances of the cloud service. + RoleInstances *[]string `json:"roleInstances,omitempty"` } -// RecoveryWalkResponse response after calling a manual recovery walk -type RecoveryWalkResponse struct { - autorest.Response `json:"-"` - // WalkPerformed - READ-ONLY; Whether the recovery walk was performed - WalkPerformed *bool `json:"walkPerformed,omitempty"` - // NextPlatformUpdateDomain - READ-ONLY; The next update domain that needs to be walked. Null means walk spanning all update domains has been completed - NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"` +// RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation. +type RollbackStatusInfo struct { + // SuccessfullyRolledbackInstanceCount - READ-ONLY; The number of instances which have been successfully rolled back. + SuccessfullyRolledbackInstanceCount *int32 `json:"successfullyRolledbackInstanceCount,omitempty"` + // FailedRolledbackInstanceCount - READ-ONLY; The number of instances which failed to rollback. + FailedRolledbackInstanceCount *int32 `json:"failedRolledbackInstanceCount,omitempty"` + // RollbackError - READ-ONLY; Error details if OS rollback failed. + RollbackError *APIError `json:"rollbackError,omitempty"` } -// MarshalJSON is the custom marshaler for RecoveryWalkResponse. -func (rwr RecoveryWalkResponse) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RollbackStatusInfo. +func (rsi RollbackStatusInfo) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// RegionalReplicationStatus this is the regional replication status. -type RegionalReplicationStatus struct { - // Region - READ-ONLY; The region to which the gallery Image Version is being replicated to. - Region *string `json:"region,omitempty"` - // State - READ-ONLY; This is the regional replication state. Possible values include: 'ReplicationStateUnknown', 'ReplicationStateReplicating', 'ReplicationStateCompleted', 'ReplicationStateFailed' - State ReplicationState `json:"state,omitempty"` - // Details - READ-ONLY; The details of the replication status. - Details *string `json:"details,omitempty"` - // Progress - READ-ONLY; It indicates progress of the replication job. - Progress *int32 `json:"progress,omitempty"` +// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade. +type RollingUpgradePolicy struct { + // MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%. + MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` + // MaxUnhealthyInstancePercent - The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%. + MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` + // MaxUnhealthyUpgradedInstancePercent - The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%. + MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` + // PauseTimeBetweenBatches - The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. The default value is 0 seconds (PT0S). + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` + // EnableCrossZoneUpgrade - Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the batch size. + EnableCrossZoneUpgrade *bool `json:"enableCrossZoneUpgrade,omitempty"` + // PrioritizeUnhealthyInstances - Upgrade all unhealthy instances in a scale set before any healthy instances. + PrioritizeUnhealthyInstances *bool `json:"prioritizeUnhealthyInstances,omitempty"` } -// MarshalJSON is the custom marshaler for RegionalReplicationStatus. -func (rrs RegionalReplicationStatus) MarshalJSON() ([]byte, error) { +// RollingUpgradeProgressInfo information about the number of virtual machine instances in each upgrade +// state. +type RollingUpgradeProgressInfo struct { + // SuccessfulInstanceCount - READ-ONLY; The number of instances that have been successfully upgraded. + SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"` + // FailedInstanceCount - READ-ONLY; The number of instances that have failed to be upgraded successfully. + FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"` + // InProgressInstanceCount - READ-ONLY; The number of instances that are currently being upgraded. + InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"` + // PendingInstanceCount - READ-ONLY; The number of instances that have not yet begun to be upgraded. + PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"` +} + +// MarshalJSON is the custom marshaler for RollingUpgradeProgressInfo. +func (rupi RollingUpgradeProgressInfo) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// ReplicationStatus this is the replication status of the gallery Image Version. -type ReplicationStatus struct { - // AggregatedState - READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'AggregatedReplicationStateUnknown', 'AggregatedReplicationStateInProgress', 'AggregatedReplicationStateCompleted', 'AggregatedReplicationStateFailed' - AggregatedState AggregatedReplicationState `json:"aggregatedState,omitempty"` - // Summary - READ-ONLY; This is a summary of replication status for each region. - Summary *[]RegionalReplicationStatus `json:"summary,omitempty"` +// RollingUpgradeRunningStatus information about the current running state of the overall upgrade. +type RollingUpgradeRunningStatus struct { + // Code - READ-ONLY; Code indicating the current status of the upgrade. Possible values include: 'RollingUpgradeStatusCodeRollingForward', 'RollingUpgradeStatusCodeCancelled', 'RollingUpgradeStatusCodeCompleted', 'RollingUpgradeStatusCodeFaulted' + Code RollingUpgradeStatusCode `json:"code,omitempty"` + // StartTime - READ-ONLY; Start time of the upgrade. + StartTime *date.Time `json:"startTime,omitempty"` + // LastAction - READ-ONLY; The last action performed on the rolling upgrade. Possible values include: 'RollingUpgradeActionTypeStart', 'RollingUpgradeActionTypeCancel' + LastAction RollingUpgradeActionType `json:"lastAction,omitempty"` + // LastActionTime - READ-ONLY; Last action time of the upgrade. + LastActionTime *date.Time `json:"lastActionTime,omitempty"` } -// MarshalJSON is the custom marshaler for ReplicationStatus. -func (rs ReplicationStatus) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RollingUpgradeRunningStatus. +func (rurs RollingUpgradeRunningStatus) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api. -type RequestRateByIntervalInput struct { - // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'IntervalInMinsThreeMins', 'IntervalInMinsFiveMins', 'IntervalInMinsThirtyMins', 'IntervalInMinsSixtyMins' - IntervalLength IntervalInMins `json:"intervalLength,omitempty"` - // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. - BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` - // FromTime - From time of the query - FromTime *date.Time `json:"fromTime,omitempty"` - // ToTime - To time of the query - ToTime *date.Time `json:"toTime,omitempty"` - // GroupByThrottlePolicy - Group query result by Throttle Policy applied. - GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by Operation Name. - GroupByOperationName *bool `json:"groupByOperationName,omitempty"` - // GroupByResourceName - Group query result by Resource Name. - GroupByResourceName *bool `json:"groupByResourceName,omitempty"` - // GroupByClientApplicationID - Group query result by Client Application ID. - GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` - // GroupByUserAgent - Group query result by User Agent. - GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` -} - -// Resource the Resource model definition. -type Resource struct { +// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfo struct { + autorest.Response `json:"-"` + *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -8785,178 +13422,179 @@ type Resource struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfo. +func (rusi RollingUpgradeStatusInfo) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if r.Location != nil { - objectMap["location"] = r.Location + if rusi.RollingUpgradeStatusInfoProperties != nil { + objectMap["properties"] = rusi.RollingUpgradeStatusInfoProperties } - if r.Tags != nil { - objectMap["tags"] = r.Tags + if rusi.Location != nil { + objectMap["location"] = rusi.Location + } + if rusi.Tags != nil { + objectMap["tags"] = rusi.Tags } return json.Marshal(objectMap) } -// ResourceRange describes the resource range. -type ResourceRange struct { - // Min - The minimum number of the resource. - Min *int32 `json:"min,omitempty"` - // Max - The maximum number of the resource. - Max *int32 `json:"max,omitempty"` -} - -// ResourceSku describes an available Compute SKU. -type ResourceSku struct { - // ResourceType - READ-ONLY; The type of resource the SKU applies to. - ResourceType *string `json:"resourceType,omitempty"` - // Name - READ-ONLY; The name of SKU. - Name *string `json:"name,omitempty"` - // Tier - READ-ONLY; Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** - Tier *string `json:"tier,omitempty"` - // Size - READ-ONLY; The Size of the SKU. - Size *string `json:"size,omitempty"` - // Family - READ-ONLY; The Family of this particular SKU. - Family *string `json:"family,omitempty"` - // Kind - READ-ONLY; The Kind of resources that are supported in this SKU. - Kind *string `json:"kind,omitempty"` - // Capacity - READ-ONLY; Specifies the number of virtual machines in the scale set. - Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` - // Locations - READ-ONLY; The set of locations that the SKU is available. - Locations *[]string `json:"locations,omitempty"` - // LocationInfo - READ-ONLY; A list of locations and availability zones in those locations where the SKU is available. - LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"` - // APIVersions - READ-ONLY; The api versions that support this SKU. - APIVersions *[]string `json:"apiVersions,omitempty"` - // Costs - READ-ONLY; Metadata for retrieving price info. - Costs *[]ResourceSkuCosts `json:"costs,omitempty"` - // Capabilities - READ-ONLY; A name value pair to describe the capability. - Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` - // Restrictions - READ-ONLY; The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. - Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` -} - -// MarshalJSON is the custom marshaler for ResourceSku. -func (rs ResourceSku) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ResourceSkuCapabilities describes The SKU capabilities object. -type ResourceSkuCapabilities struct { - // Name - READ-ONLY; An invariant to describe the feature. - Name *string `json:"name,omitempty"` - // Value - READ-ONLY; An invariant if the feature is measured by quantity. - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for ResourceSkuCapabilities. -func (rsc ResourceSkuCapabilities) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ResourceSkuCapacity describes scaling information of a SKU. -type ResourceSkuCapacity struct { - // Minimum - READ-ONLY; The minimum capacity. - Minimum *int64 `json:"minimum,omitempty"` - // Maximum - READ-ONLY; The maximum capacity that can be set. - Maximum *int64 `json:"maximum,omitempty"` - // Default - READ-ONLY; The default capacity. - Default *int64 `json:"default,omitempty"` - // ScaleType - READ-ONLY; The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone' - ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` -} - -// MarshalJSON is the custom marshaler for ResourceSkuCapacity. -func (rsc ResourceSkuCapacity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ResourceSkuCosts describes metadata for retrieving price info. -type ResourceSkuCosts struct { - // MeterID - READ-ONLY; Used for querying price from commerce. - MeterID *string `json:"meterID,omitempty"` - // Quantity - READ-ONLY; The multiplier is needed to extend the base metered cost. - Quantity *int64 `json:"quantity,omitempty"` - // ExtendedUnit - READ-ONLY; An invariant to show the extended unit. - ExtendedUnit *string `json:"extendedUnit,omitempty"` -} +// UnmarshalJSON is the custom unmarshaler for RollingUpgradeStatusInfo struct. +func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var rollingUpgradeStatusInfoProperties RollingUpgradeStatusInfoProperties + err = json.Unmarshal(*v, &rollingUpgradeStatusInfoProperties) + if err != nil { + return err + } + rusi.RollingUpgradeStatusInfoProperties = &rollingUpgradeStatusInfoProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rusi.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rusi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rusi.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + rusi.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + rusi.Tags = tags + } + } + } -// MarshalJSON is the custom marshaler for ResourceSkuCosts. -func (rsc ResourceSkuCosts) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) + return nil } -// ResourceSkuLocationInfo ... -type ResourceSkuLocationInfo struct { - // Location - READ-ONLY; Location of the SKU - Location *string `json:"location,omitempty"` - // Zones - READ-ONLY; List of availability zones where the SKU is supported. - Zones *[]string `json:"zones,omitempty"` - // ZoneDetails - READ-ONLY; Details of capabilities available to a SKU in specific zones. - ZoneDetails *[]ResourceSkuZoneDetails `json:"zoneDetails,omitempty"` +// RollingUpgradeStatusInfoProperties the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfoProperties struct { + // Policy - READ-ONLY; The rolling upgrade policies applied for this upgrade. + Policy *RollingUpgradePolicy `json:"policy,omitempty"` + // RunningStatus - READ-ONLY; Information about the current running state of the overall upgrade. + RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"` + // Progress - READ-ONLY; Information about the number of virtual machine instances in each upgrade state. + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` + // Error - READ-ONLY; Error details for this upgrade, if there are any. + Error *APIError `json:"error,omitempty"` } -// MarshalJSON is the custom marshaler for ResourceSkuLocationInfo. -func (rsli ResourceSkuLocationInfo) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfoProperties. +func (rusip RollingUpgradeStatusInfoProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) return json.Marshal(objectMap) } -// ResourceSkuRestrictionInfo ... -type ResourceSkuRestrictionInfo struct { - // Locations - READ-ONLY; Locations where the SKU is restricted - Locations *[]string `json:"locations,omitempty"` - // Zones - READ-ONLY; List of availability zones where the SKU is restricted. - Zones *[]string `json:"zones,omitempty"` +// RunCommandDocument describes the properties of a Run Command. +type RunCommandDocument struct { + autorest.Response `json:"-"` + // Script - The script to be executed. + Script *[]string `json:"script,omitempty"` + // Parameters - The parameters used by the script. + Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"` + // Schema - The VM run command schema. + Schema *string `json:"$schema,omitempty"` + // ID - The VM run command id. + ID *string `json:"id,omitempty"` + // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Label - The VM run command label. + Label *string `json:"label,omitempty"` + // Description - The VM run command description. + Description *string `json:"description,omitempty"` } -// MarshalJSON is the custom marshaler for ResourceSkuRestrictionInfo. -func (rsri ResourceSkuRestrictionInfo) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// RunCommandDocumentBase describes the properties of a Run Command metadata. +type RunCommandDocumentBase struct { + // Schema - The VM run command schema. + Schema *string `json:"$schema,omitempty"` + // ID - The VM run command id. + ID *string `json:"id,omitempty"` + // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Label - The VM run command label. + Label *string `json:"label,omitempty"` + // Description - The VM run command description. + Description *string `json:"description,omitempty"` } -// ResourceSkuRestrictions describes scaling information of a SKU. -type ResourceSkuRestrictions struct { - // Type - READ-ONLY; The type of restrictions. Possible values include: 'ResourceSkuRestrictionsTypeLocation', 'ResourceSkuRestrictionsTypeZone' - Type ResourceSkuRestrictionsType `json:"type,omitempty"` - // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. - Values *[]string `json:"values,omitempty"` - // RestrictionInfo - READ-ONLY; The information about the restriction where the SKU cannot be used. - RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` - // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'ResourceSkuRestrictionsReasonCodeQuotaID', 'ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription' - ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +// RunCommandInput capture Virtual Machine parameters. +type RunCommandInput struct { + // CommandID - The run command id. + CommandID *string `json:"commandId,omitempty"` + // Script - Optional. The script to be executed. When this value is given, the given script will override the default script of the command. + Script *[]string `json:"script,omitempty"` + // Parameters - The run command parameters. + Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"` } -// MarshalJSON is the custom marshaler for ResourceSkuRestrictions. -func (rsr ResourceSkuRestrictions) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// RunCommandInputParameter describes the properties of a run command parameter. +type RunCommandInputParameter struct { + // Name - The run command parameter name. + Name *string `json:"name,omitempty"` + // Value - The run command parameter value. + Value *string `json:"value,omitempty"` } -// ResourceSkusResult the List Resource Skus operation response. -type ResourceSkusResult struct { +// RunCommandListResult the List Virtual Machine operation response. +type RunCommandListResult struct { autorest.Response `json:"-"` - // Value - The list of skus available for the subscription. - Value *[]ResourceSku `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of Resource Skus. Call ListNext() with this URI to fetch the next page of Resource Skus + // Value - The list of virtual machine run commands. + Value *[]RunCommandDocumentBase `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands. NextLink *string `json:"nextLink,omitempty"` } -// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values. -type ResourceSkusResultIterator struct { +// RunCommandListResultIterator provides access to a complete listing of RunCommandDocumentBase values. +type RunCommandListResultIterator struct { i int - page ResourceSkusResultPage + page RunCommandListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *ResourceSkusResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *RunCommandListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/RunCommandListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -8981,67 +13619,67 @@ func (iter *ResourceSkusResultIterator) NextWithContext(ctx context.Context) (er // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *ResourceSkusResultIterator) Next() error { +func (iter *RunCommandListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ResourceSkusResultIterator) NotDone() bool { +func (iter RunCommandListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter ResourceSkusResultIterator) Response() ResourceSkusResult { +func (iter RunCommandListResultIterator) Response() RunCommandListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter ResourceSkusResultIterator) Value() ResourceSku { +func (iter RunCommandListResultIterator) Value() RunCommandDocumentBase { if !iter.page.NotDone() { - return ResourceSku{} + return RunCommandDocumentBase{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the ResourceSkusResultIterator type. -func NewResourceSkusResultIterator(page ResourceSkusResultPage) ResourceSkusResultIterator { - return ResourceSkusResultIterator{page: page} +// Creates a new instance of the RunCommandListResultIterator type. +func NewRunCommandListResultIterator(page RunCommandListResultPage) RunCommandListResultIterator { + return RunCommandListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (rsr ResourceSkusResult) IsEmpty() bool { - return rsr.Value == nil || len(*rsr.Value) == 0 +func (rclr RunCommandListResult) IsEmpty() bool { + return rclr.Value == nil || len(*rclr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (rsr ResourceSkusResult) hasNextLink() bool { - return rsr.NextLink != nil && len(*rsr.NextLink) != 0 +func (rclr RunCommandListResult) hasNextLink() bool { + return rclr.NextLink != nil && len(*rclr.NextLink) != 0 } -// resourceSkusResultPreparer prepares a request to retrieve the next set of results. +// runCommandListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (rsr ResourceSkusResult) resourceSkusResultPreparer(ctx context.Context) (*http.Request, error) { - if !rsr.hasNextLink() { +func (rclr RunCommandListResult) runCommandListResultPreparer(ctx context.Context) (*http.Request, error) { + if !rclr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(rsr.NextLink))) + autorest.WithBaseURL(to.String(rclr.NextLink))) } -// ResourceSkusResultPage contains a page of ResourceSku values. -type ResourceSkusResultPage struct { - fn func(context.Context, ResourceSkusResult) (ResourceSkusResult, error) - rsr ResourceSkusResult +// RunCommandListResultPage contains a page of RunCommandDocumentBase values. +type RunCommandListResultPage struct { + fn func(context.Context, RunCommandListResult) (RunCommandListResult, error) + rclr RunCommandListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *ResourceSkusResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *RunCommandListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/RunCommandListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -9051,11 +13689,11 @@ func (page *ResourceSkusResultPage) NextWithContext(ctx context.Context) (err er }() } for { - next, err := page.fn(ctx, page.rsr) + next, err := page.fn(ctx, page.rclr) if err != nil { return err } - page.rsr = next + page.rclr = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -9063,73 +13701,245 @@ func (page *ResourceSkusResultPage) NextWithContext(ctx context.Context) (err er return nil } -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ResourceSkusResultPage) Next() error { - return page.NextWithContext(context.Background()) +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RunCommandListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RunCommandListResultPage) NotDone() bool { + return !page.rclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RunCommandListResultPage) Response() RunCommandListResult { + return page.rclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RunCommandListResultPage) Values() []RunCommandDocumentBase { + if page.rclr.IsEmpty() { + return nil + } + return *page.rclr.Value +} + +// Creates a new instance of the RunCommandListResultPage type. +func NewRunCommandListResultPage(cur RunCommandListResult, getNextPage func(context.Context, RunCommandListResult) (RunCommandListResult, error)) RunCommandListResultPage { + return RunCommandListResultPage{ + fn: getNextPage, + rclr: cur, + } +} + +// RunCommandParameterDefinition describes the properties of a run command parameter. +type RunCommandParameterDefinition struct { + // Name - The run command parameter name. + Name *string `json:"name,omitempty"` + // Type - The run command parameter type. + Type *string `json:"type,omitempty"` + // DefaultValue - The run command parameter default value. + DefaultValue *string `json:"defaultValue,omitempty"` + // Required - The run command parameter required. + Required *bool `json:"required,omitempty"` +} + +// RunCommandResult ... +type RunCommandResult struct { + autorest.Response `json:"-"` + // Value - Run command operation response. + Value *[]InstanceViewStatus `json:"value,omitempty"` +} + +// ScaleInPolicy describes a scale-in policy for a virtual machine scale set. +type ScaleInPolicy struct { + // Rules - The rules to be followed when scaling-in a virtual machine scale set.

    Possible values are:

    **Default** When a virtual machine scale set is scaled in, the scale set will first be balanced across zones if it is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not protected from scale-in.

    **OldestVM** When a virtual machine scale set is being scaled-in, the oldest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the oldest virtual machines that are not protected will be chosen for removal.

    **NewestVM** When a virtual machine scale set is being scaled-in, the newest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the newest virtual machines that are not protected will be chosen for removal.

    + Rules *[]VirtualMachineScaleSetScaleInRules `json:"rules,omitempty"` + // ForceDeletion - This property allows you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.(Feature in Preview) + ForceDeletion *bool `json:"forceDeletion,omitempty"` +} + +// ScheduledEventsProfile ... +type ScheduledEventsProfile struct { + // TerminateNotificationProfile - Specifies Terminate Scheduled Event related configurations. + TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"` } -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ResourceSkusResultPage) NotDone() bool { - return !page.rsr.IsEmpty() +// SecurityProfile specifies the Security profile settings for the virtual machine or virtual machine scale +// set. +type SecurityProfile struct { + // UefiSettings - Specifies the security settings like secure boot and vTPM used while creating the virtual machine.

    Minimum api-version: 2020-12-01 + UefiSettings *UefiSettings `json:"uefiSettings,omitempty"` + // EncryptionAtHost - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself.

    Default: The Encryption at host will be disabled unless this property is set to true for the resource. + EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` + // SecurityType - Specifies the SecurityType of the virtual machine. It is set as TrustedLaunch to enable UefiSettings.

    Default: UefiSettings will not be enabled unless this property is set as TrustedLaunch. Possible values include: 'SecurityTypesTrustedLaunch' + SecurityType SecurityTypes `json:"securityType,omitempty"` } -// Response returns the raw server response from the last page request. -func (page ResourceSkusResultPage) Response() ResourceSkusResult { - return page.rsr +// SharedGallery specifies information about the Shared Gallery that you want to create or update. +type SharedGallery struct { + autorest.Response `json:"-"` + *SharedGalleryIdentifier `json:"identifier,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` } -// Values returns the slice of values for the current page or nil if there are no values. -func (page ResourceSkusResultPage) Values() []ResourceSku { - if page.rsr.IsEmpty() { - return nil +// MarshalJSON is the custom marshaler for SharedGallery. +func (sg SharedGallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sg.SharedGalleryIdentifier != nil { + objectMap["identifier"] = sg.SharedGalleryIdentifier } - return *page.rsr.Value + return json.Marshal(objectMap) } -// Creates a new instance of the ResourceSkusResultPage type. -func NewResourceSkusResultPage(cur ResourceSkusResult, getNextPage func(context.Context, ResourceSkusResult) (ResourceSkusResult, error)) ResourceSkusResultPage { - return ResourceSkusResultPage{ - fn: getNextPage, - rsr: cur, +// UnmarshalJSON is the custom unmarshaler for SharedGallery struct. +func (sg *SharedGallery) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "identifier": + if v != nil { + var sharedGalleryIdentifier SharedGalleryIdentifier + err = json.Unmarshal(*v, &sharedGalleryIdentifier) + if err != nil { + return err + } + sg.SharedGalleryIdentifier = &sharedGalleryIdentifier + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sg.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + sg.Location = &location + } + } } + + return nil } -// ResourceSkuZoneDetails describes The zonal capabilities of a SKU. -type ResourceSkuZoneDetails struct { - // Name - READ-ONLY; The set of zones that the SKU is available in with the specified capabilities. - Name *[]string `json:"name,omitempty"` - // Capabilities - READ-ONLY; A list of capabilities that are available for the SKU in the specified list of zones. - Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` +// SharedGalleryIdentifier the identifier information of shared gallery. +type SharedGalleryIdentifier struct { + // UniqueID - The unique id of this shared gallery. + UniqueID *string `json:"uniqueId,omitempty"` } -// MarshalJSON is the custom marshaler for ResourceSkuZoneDetails. -func (rszd ResourceSkuZoneDetails) MarshalJSON() ([]byte, error) { +// SharedGalleryImage specifies information about the gallery image definition that you want to create or +// update. +type SharedGalleryImage struct { + autorest.Response `json:"-"` + *SharedGalleryImageProperties `json:"properties,omitempty"` + *SharedGalleryIdentifier `json:"identifier,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Location - READ-ONLY; Resource location + Location *string `json:"location,omitempty"` +} + +// MarshalJSON is the custom marshaler for SharedGalleryImage. +func (sgi SharedGalleryImage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) + if sgi.SharedGalleryImageProperties != nil { + objectMap["properties"] = sgi.SharedGalleryImageProperties + } + if sgi.SharedGalleryIdentifier != nil { + objectMap["identifier"] = sgi.SharedGalleryIdentifier + } return json.Marshal(objectMap) } -// ResourceURIList the List resources which are encrypted with the disk encryption set. -type ResourceURIList struct { +// UnmarshalJSON is the custom unmarshaler for SharedGalleryImage struct. +func (sgi *SharedGalleryImage) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var sharedGalleryImageProperties SharedGalleryImageProperties + err = json.Unmarshal(*v, &sharedGalleryImageProperties) + if err != nil { + return err + } + sgi.SharedGalleryImageProperties = &sharedGalleryImageProperties + } + case "identifier": + if v != nil { + var sharedGalleryIdentifier SharedGalleryIdentifier + err = json.Unmarshal(*v, &sharedGalleryIdentifier) + if err != nil { + return err + } + sgi.SharedGalleryIdentifier = &sharedGalleryIdentifier + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sgi.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + sgi.Location = &location + } + } + } + + return nil +} + +// SharedGalleryImageList the List Shared Gallery Images operation response. +type SharedGalleryImageList struct { autorest.Response `json:"-"` - // Value - A list of IDs or Owner IDs of resources which are encrypted with the disk encryption set. - Value *[]string `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of encrypted resources. Call ListNext() with this to fetch the next page of encrypted resources. + // Value - A list of shared gallery images. + Value *[]SharedGalleryImage `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of shared gallery images. Call ListNext() with this to fetch the next page of shared gallery images. NextLink *string `json:"nextLink,omitempty"` } -// ResourceURIListIterator provides access to a complete listing of string values. -type ResourceURIListIterator struct { +// SharedGalleryImageListIterator provides access to a complete listing of SharedGalleryImage values. +type SharedGalleryImageListIterator struct { i int - page ResourceURIListPage + page SharedGalleryImageListPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *ResourceURIListIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *SharedGalleryImageListIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ResourceURIListIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageListIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -9154,67 +13964,67 @@ func (iter *ResourceURIListIterator) NextWithContext(ctx context.Context) (err e // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *ResourceURIListIterator) Next() error { +func (iter *SharedGalleryImageListIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ResourceURIListIterator) NotDone() bool { +func (iter SharedGalleryImageListIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter ResourceURIListIterator) Response() ResourceURIList { +func (iter SharedGalleryImageListIterator) Response() SharedGalleryImageList { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter ResourceURIListIterator) Value() string { +func (iter SharedGalleryImageListIterator) Value() SharedGalleryImage { if !iter.page.NotDone() { - return "" + return SharedGalleryImage{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the ResourceURIListIterator type. -func NewResourceURIListIterator(page ResourceURIListPage) ResourceURIListIterator { - return ResourceURIListIterator{page: page} +// Creates a new instance of the SharedGalleryImageListIterator type. +func NewSharedGalleryImageListIterator(page SharedGalleryImageListPage) SharedGalleryImageListIterator { + return SharedGalleryImageListIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (rul ResourceURIList) IsEmpty() bool { - return rul.Value == nil || len(*rul.Value) == 0 +func (sgil SharedGalleryImageList) IsEmpty() bool { + return sgil.Value == nil || len(*sgil.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (rul ResourceURIList) hasNextLink() bool { - return rul.NextLink != nil && len(*rul.NextLink) != 0 +func (sgil SharedGalleryImageList) hasNextLink() bool { + return sgil.NextLink != nil && len(*sgil.NextLink) != 0 } -// resourceURIListPreparer prepares a request to retrieve the next set of results. +// sharedGalleryImageListPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (rul ResourceURIList) resourceURIListPreparer(ctx context.Context) (*http.Request, error) { - if !rul.hasNextLink() { +func (sgil SharedGalleryImageList) sharedGalleryImageListPreparer(ctx context.Context) (*http.Request, error) { + if !sgil.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(rul.NextLink))) + autorest.WithBaseURL(to.String(sgil.NextLink))) } -// ResourceURIListPage contains a page of string values. -type ResourceURIListPage struct { - fn func(context.Context, ResourceURIList) (ResourceURIList, error) - rul ResourceURIList +// SharedGalleryImageListPage contains a page of SharedGalleryImage values. +type SharedGalleryImageListPage struct { + fn func(context.Context, SharedGalleryImageList) (SharedGalleryImageList, error) + sgil SharedGalleryImageList } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *ResourceURIListPage) NextWithContext(ctx context.Context) (err error) { +func (page *SharedGalleryImageListPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ResourceURIListPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageListPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -9224,11 +14034,11 @@ func (page *ResourceURIListPage) NextWithContext(ctx context.Context) (err error }() } for { - next, err := page.fn(ctx, page.rul) + next, err := page.fn(ctx, page.sgil) if err != nil { return err } - page.rul = next + page.sgil = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -9239,153 +14049,80 @@ func (page *ResourceURIListPage) NextWithContext(ctx context.Context) (err error // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *ResourceURIListPage) Next() error { +func (page *SharedGalleryImageListPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ResourceURIListPage) NotDone() bool { - return !page.rul.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ResourceURIListPage) Response() ResourceURIList { - return page.rul -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ResourceURIListPage) Values() []string { - if page.rul.IsEmpty() { - return nil - } - return *page.rul.Value -} - -// Creates a new instance of the ResourceURIListPage type. -func NewResourceURIListPage(cur ResourceURIList, getNextPage func(context.Context, ResourceURIList) (ResourceURIList, error)) ResourceURIListPage { - return ResourceURIListPage{ - fn: getNextPage, - rul: cur, - } +func (page SharedGalleryImageListPage) NotDone() bool { + return !page.sgil.IsEmpty() } -// RetrieveBootDiagnosticsDataResult the SAS URIs of the console screenshot and serial log blobs. -type RetrieveBootDiagnosticsDataResult struct { - autorest.Response `json:"-"` - // ConsoleScreenshotBlobURI - READ-ONLY; The console screenshot blob URI - ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` - // SerialConsoleLogBlobURI - READ-ONLY; The serial console log blob URI. - SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` -} - -// MarshalJSON is the custom marshaler for RetrieveBootDiagnosticsDataResult. -func (rbddr RetrieveBootDiagnosticsDataResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation. -type RollbackStatusInfo struct { - // SuccessfullyRolledbackInstanceCount - READ-ONLY; The number of instances which have been successfully rolled back. - SuccessfullyRolledbackInstanceCount *int32 `json:"successfullyRolledbackInstanceCount,omitempty"` - // FailedRolledbackInstanceCount - READ-ONLY; The number of instances which failed to rollback. - FailedRolledbackInstanceCount *int32 `json:"failedRolledbackInstanceCount,omitempty"` - // RollbackError - READ-ONLY; Error details if OS rollback failed. - RollbackError *APIError `json:"rollbackError,omitempty"` -} - -// MarshalJSON is the custom marshaler for RollbackStatusInfo. -func (rsi RollbackStatusInfo) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade. -type RollingUpgradePolicy struct { - // MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%. - MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` - // MaxUnhealthyInstancePercent - The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%. - MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` - // MaxUnhealthyUpgradedInstancePercent - The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%. - MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` - // PauseTimeBetweenBatches - The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. The default value is 0 seconds (PT0S). - PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` - // EnableCrossZoneUpgrade - Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the batch size. - EnableCrossZoneUpgrade *bool `json:"enableCrossZoneUpgrade,omitempty"` - // PrioritizeUnhealthyInstances - Upgrade all unhealthy instances in a scale set before any healthy instances. - PrioritizeUnhealthyInstances *bool `json:"prioritizeUnhealthyInstances,omitempty"` -} - -// RollingUpgradeProgressInfo information about the number of virtual machine instances in each upgrade -// state. -type RollingUpgradeProgressInfo struct { - // SuccessfulInstanceCount - READ-ONLY; The number of instances that have been successfully upgraded. - SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"` - // FailedInstanceCount - READ-ONLY; The number of instances that have failed to be upgraded successfully. - FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"` - // InProgressInstanceCount - READ-ONLY; The number of instances that are currently being upgraded. - InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"` - // PendingInstanceCount - READ-ONLY; The number of instances that have not yet begun to be upgraded. - PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"` -} - -// MarshalJSON is the custom marshaler for RollingUpgradeProgressInfo. -func (rupi RollingUpgradeProgressInfo) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// Response returns the raw server response from the last page request. +func (page SharedGalleryImageListPage) Response() SharedGalleryImageList { + return page.sgil } -// RollingUpgradeRunningStatus information about the current running state of the overall upgrade. -type RollingUpgradeRunningStatus struct { - // Code - READ-ONLY; Code indicating the current status of the upgrade. Possible values include: 'RollingUpgradeStatusCodeRollingForward', 'RollingUpgradeStatusCodeCancelled', 'RollingUpgradeStatusCodeCompleted', 'RollingUpgradeStatusCodeFaulted' - Code RollingUpgradeStatusCode `json:"code,omitempty"` - // StartTime - READ-ONLY; Start time of the upgrade. - StartTime *date.Time `json:"startTime,omitempty"` - // LastAction - READ-ONLY; The last action performed on the rolling upgrade. Possible values include: 'RollingUpgradeActionTypeStart', 'RollingUpgradeActionTypeCancel' - LastAction RollingUpgradeActionType `json:"lastAction,omitempty"` - // LastActionTime - READ-ONLY; Last action time of the upgrade. - LastActionTime *date.Time `json:"lastActionTime,omitempty"` +// Values returns the slice of values for the current page or nil if there are no values. +func (page SharedGalleryImageListPage) Values() []SharedGalleryImage { + if page.sgil.IsEmpty() { + return nil + } + return *page.sgil.Value } -// MarshalJSON is the custom marshaler for RollingUpgradeRunningStatus. -func (rurs RollingUpgradeRunningStatus) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// Creates a new instance of the SharedGalleryImageListPage type. +func NewSharedGalleryImageListPage(cur SharedGalleryImageList, getNextPage func(context.Context, SharedGalleryImageList) (SharedGalleryImageList, error)) SharedGalleryImageListPage { + return SharedGalleryImageListPage{ + fn: getNextPage, + sgil: cur, + } } -// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade. -type RollingUpgradeStatusInfo struct { - autorest.Response `json:"-"` - *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource Id - ID *string `json:"id,omitempty"` +// SharedGalleryImageProperties describes the properties of a gallery image definition. +type SharedGalleryImageProperties struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` + Disallowed *Disallowed `json:"disallowed,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // Features - A list of gallery image features. + Features *[]GalleryImageFeature `json:"features,omitempty"` + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` +} + +// SharedGalleryImageVersion specifies information about the gallery image version that you want to create +// or update. +type SharedGalleryImageVersion struct { + autorest.Response `json:"-"` + *SharedGalleryImageVersionProperties `json:"properties,omitempty"` + *SharedGalleryIdentifier `json:"identifier,omitempty"` // Name - READ-ONLY; Resource name Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location + // Location - READ-ONLY; Resource location Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfo. -func (rusi RollingUpgradeStatusInfo) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for SharedGalleryImageVersion. +func (sgiv SharedGalleryImageVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if rusi.RollingUpgradeStatusInfoProperties != nil { - objectMap["properties"] = rusi.RollingUpgradeStatusInfoProperties - } - if rusi.Location != nil { - objectMap["location"] = rusi.Location + if sgiv.SharedGalleryImageVersionProperties != nil { + objectMap["properties"] = sgiv.SharedGalleryImageVersionProperties } - if rusi.Tags != nil { - objectMap["tags"] = rusi.Tags + if sgiv.SharedGalleryIdentifier != nil { + objectMap["identifier"] = sgiv.SharedGalleryIdentifier } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for RollingUpgradeStatusInfo struct. -func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for SharedGalleryImageVersion struct. +func (sgiv *SharedGalleryImageVersion) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -9395,21 +14132,21 @@ func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var rollingUpgradeStatusInfoProperties RollingUpgradeStatusInfoProperties - err = json.Unmarshal(*v, &rollingUpgradeStatusInfoProperties) + var sharedGalleryImageVersionProperties SharedGalleryImageVersionProperties + err = json.Unmarshal(*v, &sharedGalleryImageVersionProperties) if err != nil { return err } - rusi.RollingUpgradeStatusInfoProperties = &rollingUpgradeStatusInfoProperties + sgiv.SharedGalleryImageVersionProperties = &sharedGalleryImageVersionProperties } - case "id": + case "identifier": if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) + var sharedGalleryIdentifier SharedGalleryIdentifier + err = json.Unmarshal(*v, &sharedGalleryIdentifier) if err != nil { return err } - rusi.ID = &ID + sgiv.SharedGalleryIdentifier = &sharedGalleryIdentifier } case "name": if v != nil { @@ -9418,16 +14155,7 @@ func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { if err != nil { return err } - rusi.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - rusi.Type = &typeVar + sgiv.Name = &name } case "location": if v != nil { @@ -9436,16 +14164,7 @@ func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { if err != nil { return err } - rusi.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - rusi.Tags = tags + sgiv.Location = &location } } } @@ -9453,95 +14172,194 @@ func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { return nil } -// RollingUpgradeStatusInfoProperties the status of the latest virtual machine scale set rolling upgrade. -type RollingUpgradeStatusInfoProperties struct { - // Policy - READ-ONLY; The rolling upgrade policies applied for this upgrade. - Policy *RollingUpgradePolicy `json:"policy,omitempty"` - // RunningStatus - READ-ONLY; Information about the current running state of the overall upgrade. - RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"` - // Progress - READ-ONLY; Information about the number of virtual machine instances in each upgrade state. - Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` - // Error - READ-ONLY; Error details for this upgrade, if there are any. - Error *APIError `json:"error,omitempty"` +// SharedGalleryImageVersionList the List Shared Gallery Image versions operation response. +type SharedGalleryImageVersionList struct { + autorest.Response `json:"-"` + // Value - A list of shared gallery images versions. + Value *[]SharedGalleryImageVersion `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of shared gallery image versions. Call ListNext() with this to fetch the next page of shared gallery image versions. + NextLink *string `json:"nextLink,omitempty"` } -// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfoProperties. -func (rusip RollingUpgradeStatusInfoProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) +// SharedGalleryImageVersionListIterator provides access to a complete listing of SharedGalleryImageVersion +// values. +type SharedGalleryImageVersionListIterator struct { + i int + page SharedGalleryImageVersionListPage } -// RunCommandDocument describes the properties of a Run Command. -type RunCommandDocument struct { - autorest.Response `json:"-"` - // Script - The script to be executed. - Script *[]string `json:"script,omitempty"` - // Parameters - The parameters used by the script. - Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"` - // Schema - The VM run command schema. - Schema *string `json:"$schema,omitempty"` - // ID - The VM run command id. - ID *string `json:"id,omitempty"` - // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // Label - The VM run command label. - Label *string `json:"label,omitempty"` - // Description - The VM run command description. - Description *string `json:"description,omitempty"` +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SharedGalleryImageVersionListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil } -// RunCommandDocumentBase describes the properties of a Run Command metadata. -type RunCommandDocumentBase struct { - // Schema - The VM run command schema. - Schema *string `json:"$schema,omitempty"` - // ID - The VM run command id. - ID *string `json:"id,omitempty"` - // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // Label - The VM run command label. - Label *string `json:"label,omitempty"` - // Description - The VM run command description. - Description *string `json:"description,omitempty"` +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SharedGalleryImageVersionListIterator) Next() error { + return iter.NextWithContext(context.Background()) } -// RunCommandInput capture Virtual Machine parameters. -type RunCommandInput struct { - // CommandID - The run command id. - CommandID *string `json:"commandId,omitempty"` - // Script - Optional. The script to be executed. When this value is given, the given script will override the default script of the command. - Script *[]string `json:"script,omitempty"` - // Parameters - The run command parameters. - Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"` +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SharedGalleryImageVersionListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) } -// RunCommandInputParameter describes the properties of a run command parameter. -type RunCommandInputParameter struct { - // Name - The run command parameter name. - Name *string `json:"name,omitempty"` - // Value - The run command parameter value. - Value *string `json:"value,omitempty"` +// Response returns the raw server response from the last page request. +func (iter SharedGalleryImageVersionListIterator) Response() SharedGalleryImageVersionList { + return iter.page.Response() } -// RunCommandListResult the List Virtual Machine operation response. -type RunCommandListResult struct { +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SharedGalleryImageVersionListIterator) Value() SharedGalleryImageVersion { + if !iter.page.NotDone() { + return SharedGalleryImageVersion{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SharedGalleryImageVersionListIterator type. +func NewSharedGalleryImageVersionListIterator(page SharedGalleryImageVersionListPage) SharedGalleryImageVersionListIterator { + return SharedGalleryImageVersionListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sgivl SharedGalleryImageVersionList) IsEmpty() bool { + return sgivl.Value == nil || len(*sgivl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (sgivl SharedGalleryImageVersionList) hasNextLink() bool { + return sgivl.NextLink != nil && len(*sgivl.NextLink) != 0 +} + +// sharedGalleryImageVersionListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sgivl SharedGalleryImageVersionList) sharedGalleryImageVersionListPreparer(ctx context.Context) (*http.Request, error) { + if !sgivl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sgivl.NextLink))) +} + +// SharedGalleryImageVersionListPage contains a page of SharedGalleryImageVersion values. +type SharedGalleryImageVersionListPage struct { + fn func(context.Context, SharedGalleryImageVersionList) (SharedGalleryImageVersionList, error) + sgivl SharedGalleryImageVersionList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SharedGalleryImageVersionListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.sgivl) + if err != nil { + return err + } + page.sgivl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SharedGalleryImageVersionListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SharedGalleryImageVersionListPage) NotDone() bool { + return !page.sgivl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SharedGalleryImageVersionListPage) Response() SharedGalleryImageVersionList { + return page.sgivl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SharedGalleryImageVersionListPage) Values() []SharedGalleryImageVersion { + if page.sgivl.IsEmpty() { + return nil + } + return *page.sgivl.Value +} + +// Creates a new instance of the SharedGalleryImageVersionListPage type. +func NewSharedGalleryImageVersionListPage(cur SharedGalleryImageVersionList, getNextPage func(context.Context, SharedGalleryImageVersionList) (SharedGalleryImageVersionList, error)) SharedGalleryImageVersionListPage { + return SharedGalleryImageVersionListPage{ + fn: getNextPage, + sgivl: cur, + } +} + +// SharedGalleryImageVersionProperties describes the properties of a gallery image version. +type SharedGalleryImageVersionProperties struct { + // PublishedDate - The published date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` +} + +// SharedGalleryList the List Shared Galleries operation response. +type SharedGalleryList struct { autorest.Response `json:"-"` - // Value - The list of virtual machine run commands. - Value *[]RunCommandDocumentBase `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands. + // Value - A list of shared galleries. + Value *[]SharedGallery `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of shared galleries. Call ListNext() with this to fetch the next page of shared galleries. NextLink *string `json:"nextLink,omitempty"` } -// RunCommandListResultIterator provides access to a complete listing of RunCommandDocumentBase values. -type RunCommandListResultIterator struct { +// SharedGalleryListIterator provides access to a complete listing of SharedGallery values. +type SharedGalleryListIterator struct { i int - page RunCommandListResultPage + page SharedGalleryListPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *RunCommandListResultIterator) NextWithContext(ctx context.Context) (err error) { +func (iter *SharedGalleryListIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RunCommandListResultIterator.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryListIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { @@ -9566,67 +14384,67 @@ func (iter *RunCommandListResultIterator) NextWithContext(ctx context.Context) ( // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (iter *RunCommandListResultIterator) Next() error { +func (iter *SharedGalleryListIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter RunCommandListResultIterator) NotDone() bool { +func (iter SharedGalleryListIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter RunCommandListResultIterator) Response() RunCommandListResult { +func (iter SharedGalleryListIterator) Response() SharedGalleryList { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter RunCommandListResultIterator) Value() RunCommandDocumentBase { +func (iter SharedGalleryListIterator) Value() SharedGallery { if !iter.page.NotDone() { - return RunCommandDocumentBase{} + return SharedGallery{} } return iter.page.Values()[iter.i] } -// Creates a new instance of the RunCommandListResultIterator type. -func NewRunCommandListResultIterator(page RunCommandListResultPage) RunCommandListResultIterator { - return RunCommandListResultIterator{page: page} +// Creates a new instance of the SharedGalleryListIterator type. +func NewSharedGalleryListIterator(page SharedGalleryListPage) SharedGalleryListIterator { + return SharedGalleryListIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. -func (rclr RunCommandListResult) IsEmpty() bool { - return rclr.Value == nil || len(*rclr.Value) == 0 +func (sgl SharedGalleryList) IsEmpty() bool { + return sgl.Value == nil || len(*sgl.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. -func (rclr RunCommandListResult) hasNextLink() bool { - return rclr.NextLink != nil && len(*rclr.NextLink) != 0 +func (sgl SharedGalleryList) hasNextLink() bool { + return sgl.NextLink != nil && len(*sgl.NextLink) != 0 } -// runCommandListResultPreparer prepares a request to retrieve the next set of results. +// sharedGalleryListPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (rclr RunCommandListResult) runCommandListResultPreparer(ctx context.Context) (*http.Request, error) { - if !rclr.hasNextLink() { +func (sgl SharedGalleryList) sharedGalleryListPreparer(ctx context.Context) (*http.Request, error) { + if !sgl.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(rclr.NextLink))) + autorest.WithBaseURL(to.String(sgl.NextLink))) } -// RunCommandListResultPage contains a page of RunCommandDocumentBase values. -type RunCommandListResultPage struct { - fn func(context.Context, RunCommandListResult) (RunCommandListResult, error) - rclr RunCommandListResult +// SharedGalleryListPage contains a page of SharedGallery values. +type SharedGalleryListPage struct { + fn func(context.Context, SharedGalleryList) (SharedGalleryList, error) + sgl SharedGalleryList } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *RunCommandListResultPage) NextWithContext(ctx context.Context) (err error) { +func (page *SharedGalleryListPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RunCommandListResultPage.NextWithContext") + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryListPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { @@ -9636,11 +14454,11 @@ func (page *RunCommandListResultPage) NextWithContext(ctx context.Context) (err }() } for { - next, err := page.fn(ctx, page.rclr) + next, err := page.fn(ctx, page.sgl) if err != nil { return err } - page.rclr = next + page.sgl = next if !next.hasNextLink() || !next.IsEmpty() { break } @@ -9651,78 +14469,36 @@ func (page *RunCommandListResultPage) NextWithContext(ctx context.Context) (err // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. -func (page *RunCommandListResultPage) Next() error { +func (page *SharedGalleryListPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page RunCommandListResultPage) NotDone() bool { - return !page.rclr.IsEmpty() +func (page SharedGalleryListPage) NotDone() bool { + return !page.sgl.IsEmpty() } // Response returns the raw server response from the last page request. -func (page RunCommandListResultPage) Response() RunCommandListResult { - return page.rclr +func (page SharedGalleryListPage) Response() SharedGalleryList { + return page.sgl } // Values returns the slice of values for the current page or nil if there are no values. -func (page RunCommandListResultPage) Values() []RunCommandDocumentBase { - if page.rclr.IsEmpty() { +func (page SharedGalleryListPage) Values() []SharedGallery { + if page.sgl.IsEmpty() { return nil } - return *page.rclr.Value + return *page.sgl.Value } -// Creates a new instance of the RunCommandListResultPage type. -func NewRunCommandListResultPage(cur RunCommandListResult, getNextPage func(context.Context, RunCommandListResult) (RunCommandListResult, error)) RunCommandListResultPage { - return RunCommandListResultPage{ - fn: getNextPage, - rclr: cur, +// Creates a new instance of the SharedGalleryListPage type. +func NewSharedGalleryListPage(cur SharedGalleryList, getNextPage func(context.Context, SharedGalleryList) (SharedGalleryList, error)) SharedGalleryListPage { + return SharedGalleryListPage{ + fn: getNextPage, + sgl: cur, } } -// RunCommandParameterDefinition describes the properties of a run command parameter. -type RunCommandParameterDefinition struct { - // Name - The run command parameter name. - Name *string `json:"name,omitempty"` - // Type - The run command parameter type. - Type *string `json:"type,omitempty"` - // DefaultValue - The run command parameter default value. - DefaultValue *string `json:"defaultValue,omitempty"` - // Required - The run command parameter required. - Required *bool `json:"required,omitempty"` -} - -// RunCommandResult ... -type RunCommandResult struct { - autorest.Response `json:"-"` - // Value - Run command operation response. - Value *[]InstanceViewStatus `json:"value,omitempty"` -} - -// ScaleInPolicy describes a scale-in policy for a virtual machine scale set. -type ScaleInPolicy struct { - // Rules - The rules to be followed when scaling-in a virtual machine scale set.

    Possible values are:

    **Default** When a virtual machine scale set is scaled in, the scale set will first be balanced across zones if it is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not protected from scale-in.

    **OldestVM** When a virtual machine scale set is being scaled-in, the oldest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the oldest virtual machines that are not protected will be chosen for removal.

    **NewestVM** When a virtual machine scale set is being scaled-in, the newest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the newest virtual machines that are not protected will be chosen for removal.

    - Rules *[]VirtualMachineScaleSetScaleInRules `json:"rules,omitempty"` -} - -// ScheduledEventsProfile ... -type ScheduledEventsProfile struct { - // TerminateNotificationProfile - Specifies Terminate Scheduled Event related configurations. - TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"` -} - -// SecurityProfile specifies the Security profile settings for the virtual machine or virtual machine scale -// set. -type SecurityProfile struct { - // UefiSettings - Specifies the security settings like secure boot and vTPM used while creating the virtual machine.

    Minimum api-version: 2020-12-01 - UefiSettings *UefiSettings `json:"uefiSettings,omitempty"` - // EncryptionAtHost - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself.

    Default: The Encryption at host will be disabled unless this property is set to true for the resource. - EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` - // SecurityType - Specifies the SecurityType of the virtual machine. It is set as TrustedLaunch to enable UefiSettings.

    Default: UefiSettings will not be enabled unless this property is set as TrustedLaunch. Possible values include: 'SecurityTypesTrustedLaunch' - SecurityType SecurityTypes `json:"securityType,omitempty"` -} - // ShareInfoElement ... type ShareInfoElement struct { // VMURI - READ-ONLY; A relative URI containing the ID of the VM that has the disk attached. @@ -9735,6 +14511,40 @@ func (sie ShareInfoElement) MarshalJSON() ([]byte, error) { return json.Marshal(objectMap) } +// SharingProfile profile for gallery sharing to subscription or tenant +type SharingProfile struct { + // Permissions - This property allows you to specify the permission of sharing gallery.

    Possible values are:

    **Private**

    **Groups**. Possible values include: 'GallerySharingPermissionTypesPrivate', 'GallerySharingPermissionTypesGroups' + Permissions GallerySharingPermissionTypes `json:"permissions,omitempty"` + // Groups - READ-ONLY; A list of sharing profile groups. + Groups *[]SharingProfileGroup `json:"groups,omitempty"` +} + +// MarshalJSON is the custom marshaler for SharingProfile. +func (sp SharingProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sp.Permissions != "" { + objectMap["permissions"] = sp.Permissions + } + return json.Marshal(objectMap) +} + +// SharingProfileGroup group of the gallery sharing profile +type SharingProfileGroup struct { + // Type - This property allows you to specify the type of sharing group.

    Possible values are:

    **Subscriptions**

    **AADTenants**. Possible values include: 'SharingProfileGroupTypesSubscriptions', 'SharingProfileGroupTypesAADTenants' + Type SharingProfileGroupTypes `json:"type,omitempty"` + // Ids - A list of subscription/tenant ids the gallery is aimed to be shared to. + Ids *[]string `json:"ids,omitempty"` +} + +// SharingUpdate specifies information about the gallery sharing profile update. +type SharingUpdate struct { + autorest.Response `json:"-"` + // OperationType - This property allows you to specify the operation type of gallery sharing update.

    Possible values are:

    **Add**

    **Remove**

    **Reset**. Possible values include: 'SharingUpdateOperationTypesAdd', 'SharingUpdateOperationTypesRemove', 'SharingUpdateOperationTypesReset' + OperationType SharingUpdateOperationTypes `json:"operationType,omitempty"` + // Groups - A list of sharing profile groups. + Groups *[]SharingProfileGroup `json:"groups,omitempty"` +} + // Sku describes a virtual machine scale set sku. NOTE: If the new VM SKU is not supported on the hardware // the scale set is currently on, you need to deallocate the VMs in the scale set before you modify the SKU // name. @@ -10054,13 +14864,15 @@ type SnapshotProperties struct { HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` // PurchasePlan - Purchase plan information for the image from which the source disk for the snapshot was originally created. PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"` + // SupportedCapabilities - List of supported capabilities (like Accelerated Networking) for the image from which the source disk from the snapshot was originally created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. CreationData *CreationData `json:"creationData,omitempty"` // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only. DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"` - // DiskState - The state of the snapshot. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateActiveSAS', 'DiskStateReadyToUpload', 'DiskStateActiveUpload' + // DiskState - The state of the snapshot. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateFrozen', 'DiskStateActiveSAS', 'DiskStateActiveSASFrozen', 'DiskStateReadyToUpload', 'DiskStateActiveUpload' DiskState DiskState `json:"diskState,omitempty"` // UniqueID - READ-ONLY; Unique Guid identifying the resource. UniqueID *string `json:"uniqueId,omitempty"` @@ -10078,6 +14890,10 @@ type SnapshotProperties struct { DiskAccessID *string `json:"diskAccessId,omitempty"` // SupportsHibernation - Indicates the OS on a snapshot supports hibernation. SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled' + PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation. + CompletionPercent *float64 `json:"completionPercent,omitempty"` } // MarshalJSON is the custom marshaler for SnapshotProperties. @@ -10092,6 +14908,9 @@ func (sp SnapshotProperties) MarshalJSON() ([]byte, error) { if sp.PurchasePlan != nil { objectMap["purchasePlan"] = sp.PurchasePlan } + if sp.SupportedCapabilities != nil { + objectMap["supportedCapabilities"] = sp.SupportedCapabilities + } if sp.CreationData != nil { objectMap["creationData"] = sp.CreationData } @@ -10119,6 +14938,12 @@ func (sp SnapshotProperties) MarshalJSON() ([]byte, error) { if sp.SupportsHibernation != nil { objectMap["supportsHibernation"] = sp.SupportsHibernation } + if sp.PublicNetworkAccess != "" { + objectMap["publicNetworkAccess"] = sp.PublicNetworkAccess + } + if sp.CompletionPercent != nil { + objectMap["completionPercent"] = sp.CompletionPercent + } return json.Marshal(objectMap) } @@ -10425,6 +15250,14 @@ type SnapshotUpdateProperties struct { DiskAccessID *string `json:"diskAccessId,omitempty"` // SupportsHibernation - Indicates the OS on a snapshot supports hibernation. SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled' + PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` +} + +// SoftDeletePolicy contains information about the soft deletion policy of the gallery. +type SoftDeletePolicy struct { + // IsSoftDeleteEnabled - Enables soft-deletion for resources in this gallery, allowing them to be recovered within retention time. + IsSoftDeleteEnabled *bool `json:"isSoftDeleteEnabled,omitempty"` } // SourceVault the vault id is an Azure Resource Manager Resource id in the form @@ -10434,6 +15267,16 @@ type SourceVault struct { ID *string `json:"id,omitempty"` } +// SpotRestorePolicy specifies the Spot-Try-Restore properties for the virtual machine scale set.

    +// With this property customer can enable or disable automatic restore of the evicted Spot VMSS VM +// instances opportunistically based on capacity availability and pricing constraint. +type SpotRestorePolicy struct { + // Enabled - Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints + Enabled *bool `json:"enabled,omitempty"` + // RestoreTimeout - Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances + RestoreTimeout *string `json:"restoreTimeout,omitempty"` +} + // SSHConfiguration SSH configuration for Linux based VMs running on Azure type SSHConfiguration struct { // PublicKeys - The list of SSH public keys used to authenticate with linux based VMs. @@ -10445,7 +15288,7 @@ type SSHConfiguration struct { type SSHPublicKey struct { // Path - Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys Path *string `json:"path,omitempty"` - // KeyData - SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format.

    For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + // KeyData - SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format.

    For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed). KeyData *string `json:"keyData,omitempty"` } @@ -10780,13 +15623,27 @@ func (spkur *SSHPublicKeyUpdateResource) UnmarshalJSON(body []byte) error { return nil } +// StatusCodeCount ... +type StatusCodeCount struct { + // Code - READ-ONLY; The instance view status code + Code *string `json:"code,omitempty"` + // Count - READ-ONLY; Number of instances having this status code + Count *int32 `json:"count,omitempty"` +} + +// MarshalJSON is the custom marshaler for StatusCodeCount. +func (scc StatusCodeCount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + // StorageProfile specifies the storage settings for the virtual machine disks. type StorageProfile struct { // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. ImageReference *ImageReference `json:"imageReference,omitempty"` - // OsDisk - Specifies information about the operating system disk used by the virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + // OsDisk - Specifies information about the operating system disk used by the virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). OsDisk *OSDisk `json:"osDisk,omitempty"` - // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). DataDisks *[]DataDisk `json:"dataDisks,omitempty"` } @@ -10808,60 +15665,239 @@ func (srro SubResourceReadOnly) MarshalJSON() ([]byte, error) { return json.Marshal(objectMap) } -// SubResourceWithColocationStatus ... -type SubResourceWithColocationStatus struct { - // ColocationStatus - Describes colocation status of a resource in the Proximity Placement Group. - ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` +// SubResourceWithColocationStatus ... +type SubResourceWithColocationStatus struct { + // ColocationStatus - Describes colocation status of a resource in the Proximity Placement Group. + ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// SupportedCapabilities list of supported capabilities (like accelerated networking) persisted on the disk +// resource for VM use. +type SupportedCapabilities struct { + // AcceleratedNetwork - True if the image from which the OS disk is created supports accelerated networking. + AcceleratedNetwork *bool `json:"acceleratedNetwork,omitempty"` +} + +// TargetRegion describes the target region information. +type TargetRegion struct { + // Name - The name of the region. + Name *string `json:"name,omitempty"` + // RegionalReplicaCount - The number of replicas of the Image Version to be created per region. This property is updatable. + RegionalReplicaCount *int32 `json:"regionalReplicaCount,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` + Encryption *EncryptionImages `json:"encryption,omitempty"` +} + +// TerminateNotificationProfile ... +type TerminateNotificationProfile struct { + // NotBeforeTimeout - Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event before the event is auto approved (timed out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes (PT5M) + NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"` + // Enable - Specifies whether the Terminate Scheduled event is enabled or disabled. + Enable *bool `json:"enable,omitempty"` +} + +// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. +type ThrottledRequestsInput struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + // GroupByClientApplicationID - Group query result by Client Application ID. + GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` + // GroupByUserAgent - Group query result by User Agent. + GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` +} + +// UefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual +// machine.

    Minimum api-version: 2020-12-01 +type UefiSettings struct { + // SecureBootEnabled - Specifies whether secure boot should be enabled on the virtual machine.

    Minimum api-version: 2020-12-01 + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty"` + // VTpmEnabled - Specifies whether vTPM should be enabled on the virtual machine.

    Minimum api-version: 2020-12-01 + VTpmEnabled *bool `json:"vTpmEnabled,omitempty"` +} + +// UpdateDomain defines an update domain for the cloud service. +type UpdateDomain struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource Name + Name *string `json:"name,omitempty"` +} + +// MarshalJSON is the custom marshaler for UpdateDomain. +func (ud UpdateDomain) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// UpdateDomainListResult ... +type UpdateDomainListResult struct { + autorest.Response `json:"-"` + Value *[]UpdateDomain `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// UpdateDomainListResultIterator provides access to a complete listing of UpdateDomain values. +type UpdateDomainListResultIterator struct { + i int + page UpdateDomainListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *UpdateDomainListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UpdateDomainListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *UpdateDomainListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter UpdateDomainListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter UpdateDomainListResultIterator) Response() UpdateDomainListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter UpdateDomainListResultIterator) Value() UpdateDomain { + if !iter.page.NotDone() { + return UpdateDomain{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the UpdateDomainListResultIterator type. +func NewUpdateDomainListResultIterator(page UpdateDomainListResultPage) UpdateDomainListResultIterator { + return UpdateDomainListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (udlr UpdateDomainListResult) IsEmpty() bool { + return udlr.Value == nil || len(*udlr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (udlr UpdateDomainListResult) hasNextLink() bool { + return udlr.NextLink != nil && len(*udlr.NextLink) != 0 +} + +// updateDomainListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (udlr UpdateDomainListResult) updateDomainListResultPreparer(ctx context.Context) (*http.Request, error) { + if !udlr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(udlr.NextLink))) +} + +// UpdateDomainListResultPage contains a page of UpdateDomain values. +type UpdateDomainListResultPage struct { + fn func(context.Context, UpdateDomainListResult) (UpdateDomainListResult, error) + udlr UpdateDomainListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *UpdateDomainListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UpdateDomainListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.udlr) + if err != nil { + return err + } + page.udlr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *UpdateDomainListResultPage) Next() error { + return page.NextWithContext(context.Background()) } -// TargetRegion describes the target region information. -type TargetRegion struct { - // Name - The name of the region. - Name *string `json:"name,omitempty"` - // RegionalReplicaCount - The number of replicas of the Image Version to be created per region. This property is updatable. - RegionalReplicaCount *int32 `json:"regionalReplicaCount,omitempty"` - // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS' - StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` - Encryption *EncryptionImages `json:"encryption,omitempty"` +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page UpdateDomainListResultPage) NotDone() bool { + return !page.udlr.IsEmpty() } -// TerminateNotificationProfile ... -type TerminateNotificationProfile struct { - // NotBeforeTimeout - Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event before the event is auto approved (timed out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes (PT5M) - NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"` - // Enable - Specifies whether the Terminate Scheduled event is enabled or disabled. - Enable *bool `json:"enable,omitempty"` +// Response returns the raw server response from the last page request. +func (page UpdateDomainListResultPage) Response() UpdateDomainListResult { + return page.udlr } -// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. -type ThrottledRequestsInput struct { - // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. - BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` - // FromTime - From time of the query - FromTime *date.Time `json:"fromTime,omitempty"` - // ToTime - To time of the query - ToTime *date.Time `json:"toTime,omitempty"` - // GroupByThrottlePolicy - Group query result by Throttle Policy applied. - GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by Operation Name. - GroupByOperationName *bool `json:"groupByOperationName,omitempty"` - // GroupByResourceName - Group query result by Resource Name. - GroupByResourceName *bool `json:"groupByResourceName,omitempty"` - // GroupByClientApplicationID - Group query result by Client Application ID. - GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` - // GroupByUserAgent - Group query result by User Agent. - GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` +// Values returns the slice of values for the current page or nil if there are no values. +func (page UpdateDomainListResultPage) Values() []UpdateDomain { + if page.udlr.IsEmpty() { + return nil + } + return *page.udlr.Value } -// UefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual -// machine.

    Minimum api-version: 2020-12-01 -type UefiSettings struct { - // SecureBootEnabled - Specifies whether secure boot should be enabled on the virtual machine.

    Minimum api-version: 2020-12-01 - SecureBootEnabled *bool `json:"secureBootEnabled,omitempty"` - // VTpmEnabled - Specifies whether vTPM should be enabled on the virtual machine.

    Minimum api-version: 2020-12-01 - VTpmEnabled *bool `json:"vTpmEnabled,omitempty"` +// Creates a new instance of the UpdateDomainListResultPage type. +func NewUpdateDomainListResultPage(cur UpdateDomainListResult, getNextPage func(context.Context, UpdateDomainListResult) (UpdateDomainListResult, error)) UpdateDomainListResultPage { + return UpdateDomainListResultPage{ + fn: getNextPage, + udlr: cur, + } } // UpdateResource the Update Resource model definition. @@ -11006,7 +16042,7 @@ type UserArtifactSource struct { // VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate // should reside on the VM. type VaultCertificate struct { - // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    }
    To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows). CertificateURL *string `json:"certificateUrl,omitempty"` // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are .pem formatted. CertificateStore *string `json:"certificateStore,omitempty"` @@ -11524,6 +16560,8 @@ type VirtualMachineExtensionProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` // InstanceView - The virtual machine extension instance view. InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` + // SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false. + SuppressFailures *bool `json:"suppressFailures,omitempty"` } // MarshalJSON is the custom marshaler for VirtualMachineExtensionProperties. @@ -11556,6 +16594,9 @@ func (vmep VirtualMachineExtensionProperties) MarshalJSON() ([]byte, error) { if vmep.InstanceView != nil { objectMap["instanceView"] = vmep.InstanceView } + if vmep.SuppressFailures != nil { + objectMap["suppressFailures"] = vmep.SuppressFailures + } return json.Marshal(objectMap) } @@ -11759,6 +16800,8 @@ type VirtualMachineExtensionUpdateProperties struct { Settings interface{} `json:"settings,omitempty"` // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. ProtectedSettings interface{} `json:"protectedSettings,omitempty"` + // SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false. + SuppressFailures *bool `json:"suppressFailures,omitempty"` } // VirtualMachineHealthStatus the health status of the VM. @@ -12108,6 +17151,14 @@ func (vmiv VirtualMachineInstanceView) MarshalJSON() ([]byte, error) { return json.Marshal(objectMap) } +// VirtualMachineIPTag contains the IP tag associated with the public IP address. +type VirtualMachineIPTag struct { + // IPTagType - IP tag type. Example: FirstPartyUsage. + IPTagType *string `json:"ipTagType,omitempty"` + // Tag - IP tag associated with the public IP. Example: SQL, Storage etc. + Tag *string `json:"tag,omitempty"` +} + // VirtualMachineListResult the List Virtual Machine operation response. type VirtualMachineListResult struct { autorest.Response `json:"-"` @@ -12267,6 +17318,160 @@ func NewVirtualMachineListResultPage(cur VirtualMachineListResult, getNextPage f } } +// VirtualMachineNetworkInterfaceConfiguration describes a virtual machine network interface +// configurations. +type VirtualMachineNetworkInterfaceConfiguration struct { + // Name - The network interface configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineNetworkInterfaceConfigurationProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineNetworkInterfaceConfiguration. +func (vmnic VirtualMachineNetworkInterfaceConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmnic.Name != nil { + objectMap["name"] = vmnic.Name + } + if vmnic.VirtualMachineNetworkInterfaceConfigurationProperties != nil { + objectMap["properties"] = vmnic.VirtualMachineNetworkInterfaceConfigurationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineNetworkInterfaceConfiguration struct. +func (vmnic *VirtualMachineNetworkInterfaceConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmnic.Name = &name + } + case "properties": + if v != nil { + var virtualMachineNetworkInterfaceConfigurationProperties VirtualMachineNetworkInterfaceConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineNetworkInterfaceConfigurationProperties) + if err != nil { + return err + } + vmnic.VirtualMachineNetworkInterfaceConfigurationProperties = &virtualMachineNetworkInterfaceConfigurationProperties + } + } + } + + return nil +} + +// VirtualMachineNetworkInterfaceConfigurationProperties describes a virtual machine network profile's IP +// configuration. +type VirtualMachineNetworkInterfaceConfigurationProperties struct { + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` + // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + // EnableFpga - Specifies whether the network interface is FPGA networking-enabled. + EnableFpga *bool `json:"enableFpga,omitempty"` + // EnableIPForwarding - Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + // NetworkSecurityGroup - The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + // DNSSettings - The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineNetworkInterfaceDNSSettingsConfiguration `json:"dnsSettings,omitempty"` + // IPConfigurations - Specifies the IP configurations of the network interface. + IPConfigurations *[]VirtualMachineNetworkInterfaceIPConfiguration `json:"ipConfigurations,omitempty"` + DscpConfiguration *SubResource `json:"dscpConfiguration,omitempty"` +} + +// VirtualMachineNetworkInterfaceDNSSettingsConfiguration describes a virtual machines network +// configuration's DNS settings. +type VirtualMachineNetworkInterfaceDNSSettingsConfiguration struct { + // DNSServers - List of DNS servers IP addresses + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// VirtualMachineNetworkInterfaceIPConfiguration describes a virtual machine network profile's IP +// configuration. +type VirtualMachineNetworkInterfaceIPConfiguration struct { + // Name - The IP configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineNetworkInterfaceIPConfigurationProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineNetworkInterfaceIPConfiguration. +func (vmniic VirtualMachineNetworkInterfaceIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmniic.Name != nil { + objectMap["name"] = vmniic.Name + } + if vmniic.VirtualMachineNetworkInterfaceIPConfigurationProperties != nil { + objectMap["properties"] = vmniic.VirtualMachineNetworkInterfaceIPConfigurationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineNetworkInterfaceIPConfiguration struct. +func (vmniic *VirtualMachineNetworkInterfaceIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmniic.Name = &name + } + case "properties": + if v != nil { + var virtualMachineNetworkInterfaceIPConfigurationProperties VirtualMachineNetworkInterfaceIPConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineNetworkInterfaceIPConfigurationProperties) + if err != nil { + return err + } + vmniic.VirtualMachineNetworkInterfaceIPConfigurationProperties = &virtualMachineNetworkInterfaceIPConfigurationProperties + } + } + } + + return nil +} + +// VirtualMachineNetworkInterfaceIPConfigurationProperties describes a virtual machine network interface IP +// configuration properties. +type VirtualMachineNetworkInterfaceIPConfigurationProperties struct { + // Subnet - Specifies the identifier of the subnet. + Subnet *SubResource `json:"subnet,omitempty"` + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // PublicIPAddressConfiguration - The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachinePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionsIPv4', 'IPVersionsIPv6' + PrivateIPAddressVersion IPVersions `json:"privateIPAddressVersion,omitempty"` + // ApplicationSecurityGroups - Specifies an array of references to application security group. + ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"` + // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A virtual machine can reference backend address pools of multiple application gateways. Multiple virtual machines cannot use the same application gateway. + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A virtual machine can reference backend address pools of one public and one internal load balancer. [Multiple virtual machines cannot use the same basic sku load balancer]. + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` +} + // VirtualMachinePatchStatus the status of virtual machine patch operations. type VirtualMachinePatchStatus struct { // AvailablePatchSummary - The available patch summary of the latest assessment operation for the virtual machine. @@ -12305,7 +17510,7 @@ type VirtualMachineProperties struct { SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added should be under the same resource group as the availability set resource. An existing VM cannot be added to an availability set.

    This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Availability sets overview](https://docs.microsoft.com/azure/virtual-machines/availability-set-overview).

    For more information on Azure planned maintenance, see [Maintenance and updates for Virtual Machines in Azure](https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates)

    Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added should be under the same resource group as the availability set resource. An existing VM cannot be added to an availability set.

    This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` // VirtualMachineScaleSet - Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set.

    This property cannot exist along with a non-null properties.availabilitySet reference.

    Minimum api‐version: 2019‐03‐01 VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"` @@ -12333,6 +17538,14 @@ type VirtualMachineProperties struct { ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty"` // PlatformFaultDomain - Specifies the scale set logical fault domain into which the Virtual Machine will be created. By default, the Virtual Machine will by automatically assigned to a fault domain that best maintains balance across available fault domains.
  • This is applicable only if the 'virtualMachineScaleSet' property of this Virtual Machine is set.
  • The Virtual Machine Scale Set that is referenced, must have 'platformFaultDomainCount' > 1.
  • This property cannot be updated once the Virtual Machine is created.
  • Fault domain assignment can be viewed in the Virtual Machine Instance View.

    Minimum api‐version: 2020‐12‐01 PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // ScheduledEventsProfile - Specifies Scheduled Event related configurations. + ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` + // UserData - UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here.

    Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` + // CapacityReservation - Specifies information about the capacity reservation that is used to allocate virtual machine.

    Minimum api-version: 2021-04-01. + CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"` + // ApplicationProfile - Specifies the gallery applications that should be made available to the VM/VMSS + ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"` } // MarshalJSON is the custom marshaler for VirtualMachineProperties. @@ -12392,9 +17605,113 @@ func (vmp VirtualMachineProperties) MarshalJSON() ([]byte, error) { if vmp.PlatformFaultDomain != nil { objectMap["platformFaultDomain"] = vmp.PlatformFaultDomain } + if vmp.ScheduledEventsProfile != nil { + objectMap["scheduledEventsProfile"] = vmp.ScheduledEventsProfile + } + if vmp.UserData != nil { + objectMap["userData"] = vmp.UserData + } + if vmp.CapacityReservation != nil { + objectMap["capacityReservation"] = vmp.CapacityReservation + } + if vmp.ApplicationProfile != nil { + objectMap["applicationProfile"] = vmp.ApplicationProfile + } + return json.Marshal(objectMap) +} + +// VirtualMachinePublicIPAddressConfiguration describes a virtual machines IP Configuration's +// PublicIPAddress configuration +type VirtualMachinePublicIPAddressConfiguration struct { + // Name - The publicIP address configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachinePublicIPAddressConfigurationProperties `json:"properties,omitempty"` + Sku *PublicIPAddressSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachinePublicIPAddressConfiguration. +func (vmpiac VirtualMachinePublicIPAddressConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmpiac.Name != nil { + objectMap["name"] = vmpiac.Name + } + if vmpiac.VirtualMachinePublicIPAddressConfigurationProperties != nil { + objectMap["properties"] = vmpiac.VirtualMachinePublicIPAddressConfigurationProperties + } + if vmpiac.Sku != nil { + objectMap["sku"] = vmpiac.Sku + } return json.Marshal(objectMap) } +// UnmarshalJSON is the custom unmarshaler for VirtualMachinePublicIPAddressConfiguration struct. +func (vmpiac *VirtualMachinePublicIPAddressConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmpiac.Name = &name + } + case "properties": + if v != nil { + var virtualMachinePublicIPAddressConfigurationProperties VirtualMachinePublicIPAddressConfigurationProperties + err = json.Unmarshal(*v, &virtualMachinePublicIPAddressConfigurationProperties) + if err != nil { + return err + } + vmpiac.VirtualMachinePublicIPAddressConfigurationProperties = &virtualMachinePublicIPAddressConfigurationProperties + } + case "sku": + if v != nil { + var sku PublicIPAddressSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + vmpiac.Sku = &sku + } + } + } + + return nil +} + +// VirtualMachinePublicIPAddressConfigurationProperties describes a virtual machines IP Configuration's +// PublicIPAddress configuration +type VirtualMachinePublicIPAddressConfigurationProperties struct { + // IdleTimeoutInMinutes - The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // DeleteOption - Specify what happens to the public IP address when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` + // DNSSettings - The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachinePublicIPAddressDNSSettingsConfiguration `json:"dnsSettings,omitempty"` + // IPTags - The list of IP tags associated with the public IP address. + IPTags *[]VirtualMachineIPTag `json:"ipTags,omitempty"` + // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses. + PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` + // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionsIPv4', 'IPVersionsIPv6' + PublicIPAddressVersion IPVersions `json:"publicIPAddressVersion,omitempty"` + // PublicIPAllocationMethod - Specify the public IP allocation type. Possible values include: 'PublicIPAllocationMethodDynamic', 'PublicIPAllocationMethodStatic' + PublicIPAllocationMethod PublicIPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` +} + +// VirtualMachinePublicIPAddressDNSSettingsConfiguration describes a virtual machines network +// configuration's DNS settings. +type VirtualMachinePublicIPAddressDNSSettingsConfiguration struct { + // DomainNameLabel - The Domain name label prefix of the PublicIPAddress resources that will be created. The generated name label is the concatenation of the domain name label and vm network profile unique ID. + DomainNameLabel *string `json:"domainNameLabel,omitempty"` +} + // VirtualMachineReimageParameters parameters for Reimaging Virtual Machine. NOTE: Virtual Machine OS disk // will always be reimaged type VirtualMachineReimageParameters struct { @@ -13427,6 +18744,8 @@ type VirtualMachineScaleSetExtensionProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` // ProvisionAfterExtensions - Collection of extension names after which this extension needs to be provisioned. ProvisionAfterExtensions *[]string `json:"provisionAfterExtensions,omitempty"` + // SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false. + SuppressFailures *bool `json:"suppressFailures,omitempty"` } // MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtensionProperties. @@ -13459,6 +18778,9 @@ func (vmssep VirtualMachineScaleSetExtensionProperties) MarshalJSON() ([]byte, e if vmssep.ProvisionAfterExtensions != nil { objectMap["provisionAfterExtensions"] = vmssep.ProvisionAfterExtensions } + if vmssep.SuppressFailures != nil { + objectMap["suppressFailures"] = vmssep.SuppressFailures + } return json.Marshal(objectMap) } @@ -14566,6 +19888,8 @@ type VirtualMachineScaleSetNetworkConfigurationProperties struct { IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` // EnableIPForwarding - Whether IP forwarding enabled on this NIC. EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` } // VirtualMachineScaleSetNetworkProfile describes a virtual machine scale set network profile. @@ -14574,6 +19898,8 @@ type VirtualMachineScaleSetNetworkProfile struct { HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` // NetworkInterfaceConfigurations - The list of network configurations. NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` + // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne' + NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"` } // VirtualMachineScaleSetOSDisk describes a virtual machine scale set operating system disk. @@ -14604,17 +19930,17 @@ type VirtualMachineScaleSetOSDisk struct { type VirtualMachineScaleSetOSProfile struct { // ComputerNamePrefix - Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long. ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` - // AdminUsername - Specifies the name of the administrator account.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + // AdminUsername - Specifies the name of the administrator account.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters AdminUsername *string `json:"adminUsername,omitempty"` - // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) + // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection) AdminPassword *string `json:"adminPassword,omitempty"` - // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init) CustomData *string `json:"customData,omitempty"` // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` - // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros). LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` - // Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. + // Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows). Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` } @@ -14636,7 +19962,7 @@ type VirtualMachineScaleSetProperties struct { UniqueID *string `json:"uniqueId,omitempty"` // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true. SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` - // ZoneBalance - Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage. + // ZoneBalance - Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage. zoneBalance property can only be set if the zones property of the scale set contains more than one zone. If there are no zones or only one zone specified, then zoneBalance property should not be set. ZoneBalance *bool `json:"zoneBalance,omitempty"` // PlatformFaultDomainCount - Fault Domain count for each placement group. PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` @@ -14646,10 +19972,12 @@ type VirtualMachineScaleSetProperties struct { HostGroup *SubResource `json:"hostGroup,omitempty"` // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type. AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` - // ScaleInPolicy - Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in. + // ScaleInPolicy - Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` // OrchestrationMode - Specifies the orchestration mode for the virtual machine scale set. Possible values include: 'OrchestrationModeUniform', 'OrchestrationModeFlexible' OrchestrationMode OrchestrationMode `json:"orchestrationMode,omitempty"` + // SpotRestorePolicy - Specifies the Spot Restore properties for the virtual machine scale set. + SpotRestorePolicy *SpotRestorePolicy `json:"spotRestorePolicy,omitempty"` } // MarshalJSON is the custom marshaler for VirtualMachineScaleSetProperties. @@ -14694,6 +20022,9 @@ func (vmssp VirtualMachineScaleSetProperties) MarshalJSON() ([]byte, error) { if vmssp.OrchestrationMode != "" { objectMap["orchestrationMode"] = vmssp.OrchestrationMode } + if vmssp.SpotRestorePolicy != nil { + objectMap["spotRestorePolicy"] = vmssp.SpotRestorePolicy + } return json.Marshal(objectMap) } @@ -14703,6 +20034,7 @@ type VirtualMachineScaleSetPublicIPAddressConfiguration struct { // Name - The publicIP address configuration name. Name *string `json:"name,omitempty"` *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"` + Sku *PublicIPAddressSku `json:"sku,omitempty"` } // MarshalJSON is the custom marshaler for VirtualMachineScaleSetPublicIPAddressConfiguration. @@ -14714,6 +20046,9 @@ func (vmsspiac VirtualMachineScaleSetPublicIPAddressConfiguration) MarshalJSON() if vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties != nil { objectMap["properties"] = vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties } + if vmsspiac.Sku != nil { + objectMap["sku"] = vmsspiac.Sku + } return json.Marshal(objectMap) } @@ -14744,6 +20079,15 @@ func (vmsspiac *VirtualMachineScaleSetPublicIPAddressConfiguration) UnmarshalJSO } vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties = &virtualMachineScaleSetPublicIPAddressConfigurationProperties } + case "sku": + if v != nil { + var sku PublicIPAddressSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + vmsspiac.Sku = &sku + } } } @@ -14770,6 +20114,8 @@ type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6' PublicIPAddressVersion IPVersion `json:"publicIPAddressVersion,omitempty"` + // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` } // VirtualMachineScaleSetReimageParameters describes a Virtual Machine Scale Set VM Reimage Parameters. @@ -15379,9 +20725,9 @@ func (future *VirtualMachineScaleSetsStartFuture) result(client VirtualMachineSc type VirtualMachineScaleSetStorageProfile struct { // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. ImageReference *ImageReference `json:"imageReference,omitempty"` - // OsDisk - Specifies information about the operating system disk used by the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + // OsDisk - Specifies information about the operating system disk used by the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` - // DataDisks - Specifies the parameters that are used to add data disks to the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + // DataDisks - Specifies the parameters that are used to add data disks to the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview). DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` } @@ -15733,6 +21079,8 @@ type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"` // EnableIPForwarding - Whether IP forwarding enabled on this NIC. EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` } // VirtualMachineScaleSetUpdateNetworkProfile describes a virtual machine scale set network profile. @@ -15741,6 +21089,8 @@ type VirtualMachineScaleSetUpdateNetworkProfile struct { HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` // NetworkInterfaceConfigurations - The list of network configurations. NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` + // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne' + NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"` } // VirtualMachineScaleSetUpdateOSDisk describes virtual machine scale set operating system disk Update @@ -15788,7 +21138,7 @@ type VirtualMachineScaleSetUpdateProperties struct { SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type. AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` - // ScaleInPolicy - Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in. + // ScaleInPolicy - Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine scale set should be assigned to.

    Minimum api-version: 2018-04-01. ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` @@ -15854,6 +21204,8 @@ type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct { IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` // DNSSettings - The dns settings to be applied on the publicIP addresses . DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` + // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach' + DeleteOption DeleteOptions `json:"deleteOption,omitempty"` } // VirtualMachineScaleSetUpdateStorageProfile describes a virtual machine scale set storage profile. @@ -15886,6 +21238,8 @@ type VirtualMachineScaleSetUpdateVMProfile struct { BillingProfile *BillingProfile `json:"billingProfile,omitempty"` // ScheduledEventsProfile - Specifies Scheduled Event related configurations. ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` + // UserData - UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here.

    Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` } // VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine. @@ -16604,6 +21958,12 @@ type VirtualMachineScaleSetVMProfile struct { BillingProfile *BillingProfile `json:"billingProfile,omitempty"` // ScheduledEventsProfile - Specifies Scheduled Event related configurations. ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` + // UserData - UserData for the virtual machines in the scale set, which must be base-64 encoded. Customer should not pass any secrets in here.

    Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` + // CapacityReservation - Specifies the capacity reservation related details of a scale set.

    Minimum api-version: 2021-04-01. + CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"` + // ApplicationProfile - Specifies the gallery applications that should be made available to the VM/VMSS + ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"` } // VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual @@ -16631,7 +21991,7 @@ type VirtualMachineScaleSetVMProperties struct { NetworkProfileConfiguration *VirtualMachineScaleSetVMNetworkProfileConfiguration `json:"networkProfileConfiguration,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Availability sets overview](https://docs.microsoft.com/azure/virtual-machines/availability-set-overview).

    For more information on Azure planned maintenance, see [Maintenance and updates for Virtual Machines in Azure](https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. ProvisioningState *string `json:"provisioningState,omitempty"` @@ -16641,6 +22001,8 @@ type VirtualMachineScaleSetVMProperties struct { ModelDefinitionApplied *string `json:"modelDefinitionApplied,omitempty"` // ProtectionPolicy - Specifies the protection policy of the virtual machine. ProtectionPolicy *VirtualMachineScaleSetVMProtectionPolicy `json:"protectionPolicy,omitempty"` + // UserData - UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here.

    Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` } // MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMProperties. @@ -16679,6 +22041,9 @@ func (vmssvp VirtualMachineScaleSetVMProperties) MarshalJSON() ([]byte, error) { if vmssvp.ProtectionPolicy != nil { objectMap["protectionPolicy"] = vmssvp.ProtectionPolicy } + if vmssvp.UserData != nil { + objectMap["userData"] = vmssvp.UserData + } return json.Marshal(objectMap) } @@ -17482,7 +22847,7 @@ func (future *VirtualMachinesInstallPatchesFuture) result(client VirtualMachines type VirtualMachineSize struct { // Name - The name of the virtual machine size. Name *string `json:"name,omitempty"` - // NumberOfCores - The number of cores supported by the virtual machine size. + // NumberOfCores - The number of cores supported by the virtual machine size. For Constrained vCPU capable VM sizes, this number represents the total vCPUs of quota that the VM uses. For accurate vCPU count, please refer to https://docs.microsoft.com/azure/virtual-machines/constrained-vcpu or https://docs.microsoft.com/rest/api/compute/resourceskus/list NumberOfCores *int32 `json:"numberOfCores,omitempty"` // OsDiskSizeInMB - The OS disk size, in MB, allowed by the virtual machine size. OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` @@ -17985,19 +23350,40 @@ func (vmu *VirtualMachineUpdate) UnmarshalJSON(body []byte) error { return nil } +// VMGalleryApplication specifies the required information to reference a compute gallery application +// version +type VMGalleryApplication struct { + // Tags - Optional, Specifies a passthrough value for more generic context. + Tags *string `json:"tags,omitempty"` + // Order - Optional, Specifies the order in which the packages have to be installed + Order *int32 `json:"order,omitempty"` + // PackageReferenceID - Specifies the GalleryApplicationVersion resource id on the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{application}/versions/{version} + PackageReferenceID *string `json:"packageReferenceId,omitempty"` + // ConfigurationReference - Optional, Specifies the uri to an azure blob that will replace the default configuration for the package if provided + ConfigurationReference *string `json:"configurationReference,omitempty"` +} + // VMScaleSetConvertToSinglePlacementGroupInput ... type VMScaleSetConvertToSinglePlacementGroupInput struct { // ActivePlacementGroupID - Id of the placement group in which you want future virtual machine instances to be placed. To query placement group Id, please use Virtual Machine Scale Set VMs - Get API. If not provided, the platform will choose one with maximum number of virtual machine instances. ActivePlacementGroupID *string `json:"activePlacementGroupId,omitempty"` } +// VMSizeProperties specifies VM Size Property settings on the virtual machine. +type VMSizeProperties struct { + // VCPUsAvailable - Specifies the number of vCPUs available for the VM.

    When this property is not specified in the request body the default behavior is to set it to the value of vCPUs available for that VM size exposed in api response of [List all available virtual machine sizes in a region](https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list) . + VCPUsAvailable *int32 `json:"vCPUsAvailable,omitempty"` + // VCPUsPerCore - Specifies the vCPU to physical core ratio.

    When this property is not specified in the request body the default behavior is set to the value of vCPUsPerCore for the VM Size exposed in api response of [List all available virtual machine sizes in a region](https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list)

    Setting this property to 1 also means that hyper-threading is disabled. + VCPUsPerCore *int32 `json:"vCPUsPerCore,omitempty"` +} + // WindowsConfiguration specifies Windows operating system settings on the virtual machine. type WindowsConfiguration struct { // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

    When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` // EnableAutomaticUpdates - Indicates whether Automatic Updates is enabled for the Windows virtual machine. Default value is true.

    For virtual machine scale sets, this property can be updated and updates will take effect on OS reprovisioning. EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` - // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time".

    Possible values can be [TimeZoneInfo.Id](https://docs.microsoft.com/en-us/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id) value from time zones returned by [TimeZoneInfo.GetSystemTimeZones](https://docs.microsoft.com/en-us/dotnet/api/system.timezoneinfo.getsystemtimezones). + // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time".

    Possible values can be [TimeZoneInfo.Id](https://docs.microsoft.com/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id) value from time zones returned by [TimeZoneInfo.GetSystemTimeZones](https://docs.microsoft.com/dotnet/api/system.timezoneinfo.getsystemtimezones). TimeZone *string `json:"timeZone,omitempty"` // AdditionalUnattendContent - Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` @@ -18031,6 +23417,6 @@ type WinRMConfiguration struct { type WinRMListener struct { // Protocol - Specifies the protocol of WinRM listener.

    Possible values are:
    **http**

    **https**. Possible values include: 'ProtocolTypesHTTP', 'ProtocolTypesHTTPS' Protocol ProtocolTypes `json:"protocol,omitempty"` - // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    }
    To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows). CertificateURL *string `json:"certificateUrl,omitempty"` } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go index 2fbe19560a79..b76ee91bfdf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go @@ -66,7 +66,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/proximityplacementgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/proximityplacementgroups.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go index efc93754a8cb..8144933b91db 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/proximityplacementgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go @@ -77,7 +77,7 @@ func (client ProximityPlacementGroupsClient) CreateOrUpdatePreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -155,7 +155,7 @@ func (client ProximityPlacementGroupsClient) DeletePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -232,7 +232,7 @@ func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -314,7 +314,7 @@ func (client ProximityPlacementGroupsClient) ListByResourceGroupPreparer(ctx con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -427,7 +427,7 @@ func (client ProximityPlacementGroupsClient) ListBySubscriptionPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -541,7 +541,7 @@ func (client ProximityPlacementGroupsClient) UpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go similarity index 88% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/resourceskus.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go index 9c967d93aa8d..35266973811b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/resourceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go @@ -33,7 +33,8 @@ func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) Res // List gets the list of Microsoft.Compute SKUs available for your Subscription. // Parameters: // filter - the filter to apply on the operation. Only **location** filter is supported currently. -func (client ResourceSkusClient) List(ctx context.Context, filter string) (result ResourceSkusResultPage, err error) { +// includeExtendedLocations - to Include Extended Locations information or not in the response. +func (client ResourceSkusClient) List(ctx context.Context, filter string, includeExtendedLocations string) (result ResourceSkusResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") defer func() { @@ -45,7 +46,7 @@ func (client ResourceSkusClient) List(ctx context.Context, filter string) (resul }() } result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, filter) + req, err := client.ListPreparer(ctx, filter, includeExtendedLocations) if err != nil { err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") return @@ -72,18 +73,21 @@ func (client ResourceSkusClient) List(ctx context.Context, filter string) (resul } // ListPreparer prepares the List request. -func (client ResourceSkusClient) ListPreparer(ctx context.Context, filter string) (*http.Request, error) { +func (client ResourceSkusClient) ListPreparer(ctx context.Context, filter string, includeExtendedLocations string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2019-04-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } + if len(includeExtendedLocations) > 0 { + queryParameters["includeExtendedLocations"] = autorest.Encode("query", includeExtendedLocations) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -133,7 +137,7 @@ func (client ResourceSkusClient) listNextResults(ctx context.Context, lastResult } // ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client ResourceSkusClient) ListComplete(ctx context.Context, filter string) (result ResourceSkusResultIterator, err error) { +func (client ResourceSkusClient) ListComplete(ctx context.Context, filter string, includeExtendedLocations string) (result ResourceSkusResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") defer func() { @@ -144,6 +148,6 @@ func (client ResourceSkusClient) ListComplete(ctx context.Context, filter string tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.List(ctx, filter) + result.page, err = client.List(ctx, filter, includeExtendedLocations) return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go new file mode 100644 index 000000000000..ea9d27484cef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go @@ -0,0 +1,582 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RestorePointCollectionsClient is the compute Client +type RestorePointCollectionsClient struct { + BaseClient +} + +// NewRestorePointCollectionsClient creates an instance of the RestorePointCollectionsClient client. +func NewRestorePointCollectionsClient(subscriptionID string) RestorePointCollectionsClient { + return NewRestorePointCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRestorePointCollectionsClientWithBaseURI creates an instance of the RestorePointCollectionsClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewRestorePointCollectionsClientWithBaseURI(baseURI string, subscriptionID string) RestorePointCollectionsClient { + return RestorePointCollectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the restore point collection. Please refer to +// https://aka.ms/RestorePoints for more details. When updating a restore point collection, only tags may be modified. +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the restore point collection. +// parameters - parameters supplied to the Create or Update restore point collection operation. +func (client RestorePointCollectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollection) (result RestorePointCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, restorePointCollectionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "CreateOrUpdate", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RestorePointCollectionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollection) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointCollectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RestorePointCollectionsClient) CreateOrUpdateResponder(resp *http.Response) (result RestorePointCollection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the restore point collection. This operation will also delete all the contained +// restore points. +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the Restore Point Collection. +func (client RestorePointCollectionsClient) Delete(ctx context.Context, resourceGroupName string, restorePointCollectionName string) (result RestorePointCollectionsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, restorePointCollectionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RestorePointCollectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointCollectionsClient) DeleteSender(req *http.Request) (future RestorePointCollectionsDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RestorePointCollectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the restore point collection. +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the restore point collection. +// expand - the expand expression to apply on the operation. If expand=restorePoints, server will return all +// contained restore points in the restorePointCollection. +func (client RestorePointCollectionsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, expand RestorePointCollectionExpandOptions) (result RestorePointCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client RestorePointCollectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, expand RestorePointCollectionExpandOptions) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointCollectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RestorePointCollectionsClient) GetResponder(resp *http.Response) (result RestorePointCollection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets the list of restore point collections in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client RestorePointCollectionsClient) List(ctx context.Context, resourceGroupName string) (result RestorePointCollectionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.List") + defer func() { + sc := -1 + if result.rpclr.Response.Response != nil { + sc = result.rpclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rpclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "List", resp, "Failure sending request") + return + } + + result.rpclr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "List", resp, "Failure responding to request") + return + } + if result.rpclr.hasNextLink() && result.rpclr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client RestorePointCollectionsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointCollectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RestorePointCollectionsClient) ListResponder(resp *http.Response) (result RestorePointCollectionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client RestorePointCollectionsClient) listNextResults(ctx context.Context, lastResults RestorePointCollectionListResult) (result RestorePointCollectionListResult, err error) { + req, err := lastResults.restorePointCollectionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client RestorePointCollectionsClient) ListComplete(ctx context.Context, resourceGroupName string) (result RestorePointCollectionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets the list of restore point collections in the subscription. Use nextLink property in the response to get +// the next page of restore point collections. Do this till nextLink is not null to fetch all the restore point +// collections. +func (client RestorePointCollectionsClient) ListAll(ctx context.Context) (result RestorePointCollectionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.ListAll") + defer func() { + sc := -1 + if result.rpclr.Response.Response != nil { + sc = result.rpclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.rpclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "ListAll", resp, "Failure sending request") + return + } + + result.rpclr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "ListAll", resp, "Failure responding to request") + return + } + if result.rpclr.hasNextLink() && result.rpclr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client RestorePointCollectionsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/restorePointCollections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointCollectionsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client RestorePointCollectionsClient) ListAllResponder(resp *http.Response) (result RestorePointCollectionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client RestorePointCollectionsClient) listAllNextResults(ctx context.Context, lastResults RestorePointCollectionListResult) (result RestorePointCollectionListResult, err error) { + req, err := lastResults.restorePointCollectionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client RestorePointCollectionsClient) ListAllComplete(ctx context.Context) (result RestorePointCollectionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} + +// Update the operation to update the restore point collection. +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the restore point collection. +// parameters - parameters supplied to the Update restore point collection operation. +func (client RestorePointCollectionsClient) Update(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollectionUpdate) (result RestorePointCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, restorePointCollectionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RestorePointCollectionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollectionUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointCollectionsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RestorePointCollectionsClient) UpdateResponder(resp *http.Response) (result RestorePointCollection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go new file mode 100644 index 000000000000..ad79c4b0eec5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go @@ -0,0 +1,297 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RestorePointsClient is the compute Client +type RestorePointsClient struct { + BaseClient +} + +// NewRestorePointsClient creates an instance of the RestorePointsClient client. +func NewRestorePointsClient(subscriptionID string) RestorePointsClient { + return NewRestorePointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRestorePointsClientWithBaseURI creates an instance of the RestorePointsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRestorePointsClientWithBaseURI(baseURI string, subscriptionID string) RestorePointsClient { + return RestorePointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create the operation to create the restore point. Updating properties of an existing restore point is not allowed +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the restore point collection. +// restorePointName - the name of the restore point. +// parameters - parameters supplied to the Create restore point operation. +func (client RestorePointsClient) Create(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint) (result RestorePointsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Create") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.RestorePointProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.RestorePointsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client RestorePointsClient) CreatePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "restorePointName": autorest.Encode("path", restorePointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointsClient) CreateSender(req *http.Request) (future RestorePointsCreateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client RestorePointsClient) CreateResponder(resp *http.Response) (result RestorePoint, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the restore point. +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the Restore Point Collection. +// restorePointName - the name of the restore point. +func (client RestorePointsClient) Delete(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (result RestorePointsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RestorePointsClient) DeletePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "restorePointName": autorest.Encode("path", restorePointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointsClient) DeleteSender(req *http.Request) (future RestorePointsDeleteFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RestorePointsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the restore point. +// Parameters: +// resourceGroupName - the name of the resource group. +// restorePointCollectionName - the name of the restore point collection. +// restorePointName - the name of the restore point. +func (client RestorePointsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (result RestorePoint, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client RestorePointsClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName), + "restorePointName": autorest.Encode("path", restorePointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RestorePointsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RestorePointsClient) GetResponder(resp *http.Response) (result RestorePoint, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go new file mode 100644 index 000000000000..261ed0770ac0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go @@ -0,0 +1,227 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SharedGalleriesClient is the compute Client +type SharedGalleriesClient struct { + BaseClient +} + +// NewSharedGalleriesClient creates an instance of the SharedGalleriesClient client. +func NewSharedGalleriesClient(subscriptionID string) SharedGalleriesClient { + return NewSharedGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSharedGalleriesClientWithBaseURI creates an instance of the SharedGalleriesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSharedGalleriesClientWithBaseURI(baseURI string, subscriptionID string) SharedGalleriesClient { + return SharedGalleriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a shared gallery by subscription id or tenant id. +// Parameters: +// location - resource location. +// galleryUniqueName - the unique name of the Shared Gallery. +func (client SharedGalleriesClient) Get(ctx context.Context, location string, galleryUniqueName string) (result SharedGallery, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleriesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, location, galleryUniqueName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client SharedGalleriesClient) GetPreparer(ctx context.Context, location string, galleryUniqueName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryUniqueName": autorest.Encode("path", galleryUniqueName), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SharedGalleriesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SharedGalleriesClient) GetResponder(resp *http.Response) (result SharedGallery, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list shared galleries by subscription id or tenant id. +// Parameters: +// location - resource location. +// sharedTo - the query parameter to decide what shared galleries to fetch when doing listing operations. +func (client SharedGalleriesClient) List(ctx context.Context, location string, sharedTo SharedToValues) (result SharedGalleryListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleriesClient.List") + defer func() { + sc := -1 + if result.sgl.Response.Response != nil { + sc = result.sgl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location, sharedTo) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sgl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "List", resp, "Failure sending request") + return + } + + result.sgl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "List", resp, "Failure responding to request") + return + } + if result.sgl.hasNextLink() && result.sgl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client SharedGalleriesClient) ListPreparer(ctx context.Context, location string, sharedTo SharedToValues) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(sharedTo)) > 0 { + queryParameters["sharedTo"] = autorest.Encode("query", sharedTo) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SharedGalleriesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SharedGalleriesClient) ListResponder(resp *http.Response) (result SharedGalleryList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SharedGalleriesClient) listNextResults(ctx context.Context, lastResults SharedGalleryList) (result SharedGalleryList, err error) { + req, err := lastResults.sharedGalleryListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SharedGalleriesClient) ListComplete(ctx context.Context, location string, sharedTo SharedToValues) (result SharedGalleryListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleriesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, location, sharedTo) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go new file mode 100644 index 000000000000..5378b4f89811 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go @@ -0,0 +1,233 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SharedGalleryImagesClient is the compute Client +type SharedGalleryImagesClient struct { + BaseClient +} + +// NewSharedGalleryImagesClient creates an instance of the SharedGalleryImagesClient client. +func NewSharedGalleryImagesClient(subscriptionID string) SharedGalleryImagesClient { + return NewSharedGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSharedGalleryImagesClientWithBaseURI creates an instance of the SharedGalleryImagesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewSharedGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) SharedGalleryImagesClient { + return SharedGalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a shared gallery image by subscription id or tenant id. +// Parameters: +// location - resource location. +// galleryUniqueName - the unique name of the Shared Gallery. +// galleryImageName - the name of the Shared Gallery Image Definition from which the Image Versions are to be +// listed. +func (client SharedGalleryImagesClient) Get(ctx context.Context, location string, galleryUniqueName string, galleryImageName string) (result SharedGalleryImage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImagesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, location, galleryUniqueName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client SharedGalleryImagesClient) GetPreparer(ctx context.Context, location string, galleryUniqueName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryUniqueName": autorest.Encode("path", galleryUniqueName), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SharedGalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SharedGalleryImagesClient) GetResponder(resp *http.Response) (result SharedGalleryImage, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list shared gallery images by subscription id or tenant id. +// Parameters: +// location - resource location. +// galleryUniqueName - the unique name of the Shared Gallery. +// sharedTo - the query parameter to decide what shared galleries to fetch when doing listing operations. +func (client SharedGalleryImagesClient) List(ctx context.Context, location string, galleryUniqueName string, sharedTo SharedToValues) (result SharedGalleryImageListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImagesClient.List") + defer func() { + sc := -1 + if result.sgil.Response.Response != nil { + sc = result.sgil.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location, galleryUniqueName, sharedTo) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sgil.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "List", resp, "Failure sending request") + return + } + + result.sgil, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "List", resp, "Failure responding to request") + return + } + if result.sgil.hasNextLink() && result.sgil.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client SharedGalleryImagesClient) ListPreparer(ctx context.Context, location string, galleryUniqueName string, sharedTo SharedToValues) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryUniqueName": autorest.Encode("path", galleryUniqueName), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(sharedTo)) > 0 { + queryParameters["sharedTo"] = autorest.Encode("query", sharedTo) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SharedGalleryImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SharedGalleryImagesClient) ListResponder(resp *http.Response) (result SharedGalleryImageList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SharedGalleryImagesClient) listNextResults(ctx context.Context, lastResults SharedGalleryImageList) (result SharedGalleryImageList, err error) { + req, err := lastResults.sharedGalleryImageListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SharedGalleryImagesClient) ListComplete(ctx context.Context, location string, galleryUniqueName string, sharedTo SharedToValues) (result SharedGalleryImageListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImagesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, location, galleryUniqueName, sharedTo) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go new file mode 100644 index 000000000000..325d3f48b3e9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go @@ -0,0 +1,240 @@ +package compute + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SharedGalleryImageVersionsClient is the compute Client +type SharedGalleryImageVersionsClient struct { + BaseClient +} + +// NewSharedGalleryImageVersionsClient creates an instance of the SharedGalleryImageVersionsClient client. +func NewSharedGalleryImageVersionsClient(subscriptionID string) SharedGalleryImageVersionsClient { + return NewSharedGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSharedGalleryImageVersionsClientWithBaseURI creates an instance of the SharedGalleryImageVersionsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewSharedGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) SharedGalleryImageVersionsClient { + return SharedGalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a shared gallery image version by subscription id or tenant id. +// Parameters: +// location - resource location. +// galleryUniqueName - the unique name of the Shared Gallery. +// galleryImageName - the name of the Shared Gallery Image Definition from which the Image Versions are to be +// listed. +// galleryImageVersionName - the name of the gallery image version to be created. Needs to follow semantic +// version name pattern: The allowed characters are digit and period. Digits must be within the range of a +// 32-bit integer. Format: .. +func (client SharedGalleryImageVersionsClient) Get(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string) (result SharedGalleryImageVersion, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, location, galleryUniqueName, galleryImageName, galleryImageVersionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client SharedGalleryImageVersionsClient) GetPreparer(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryUniqueName": autorest.Encode("path", galleryUniqueName), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SharedGalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SharedGalleryImageVersionsClient) GetResponder(resp *http.Response) (result SharedGalleryImageVersion, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list shared gallery image versions by subscription id or tenant id. +// Parameters: +// location - resource location. +// galleryUniqueName - the unique name of the Shared Gallery. +// galleryImageName - the name of the Shared Gallery Image Definition from which the Image Versions are to be +// listed. +// sharedTo - the query parameter to decide what shared galleries to fetch when doing listing operations. +func (client SharedGalleryImageVersionsClient) List(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, sharedTo SharedToValues) (result SharedGalleryImageVersionListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionsClient.List") + defer func() { + sc := -1 + if result.sgivl.Response.Response != nil { + sc = result.sgivl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location, galleryUniqueName, galleryImageName, sharedTo) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sgivl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "List", resp, "Failure sending request") + return + } + + result.sgivl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "List", resp, "Failure responding to request") + return + } + if result.sgivl.hasNextLink() && result.sgivl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client SharedGalleryImageVersionsClient) ListPreparer(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, sharedTo SharedToValues) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryUniqueName": autorest.Encode("path", galleryUniqueName), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(sharedTo)) > 0 { + queryParameters["sharedTo"] = autorest.Encode("query", sharedTo) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SharedGalleryImageVersionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SharedGalleryImageVersionsClient) ListResponder(resp *http.Response) (result SharedGalleryImageVersionList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SharedGalleryImageVersionsClient) listNextResults(ctx context.Context, lastResults SharedGalleryImageVersionList) (result SharedGalleryImageVersionList, err error) { + req, err := lastResults.sharedGalleryImageVersionListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SharedGalleryImageVersionsClient) ListComplete(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, sharedTo SharedToValues) (result SharedGalleryImageVersionListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, location, galleryUniqueName, galleryImageName, sharedTo) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/snapshots.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go index a33f86841054..3590bc688f6b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go @@ -35,7 +35,8 @@ func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) Snapsh // Parameters: // resourceGroupName - the name of the resource group. // snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot -// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. // snapshot - snapshot object supplied in the body of the Put disk operation. func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) { if tracing.IsEnabled() { @@ -91,7 +92,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -139,7 +140,8 @@ func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (resu // Parameters: // resourceGroupName - the name of the resource group. // snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot -// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Delete") @@ -174,7 +176,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -218,7 +220,8 @@ func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result autor // Parameters: // resourceGroupName - the name of the resource group. // snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot -// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. func (client SnapshotsClient) Get(ctx context.Context, resourceGroupName string, snapshotName string) (result Snapshot, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Get") @@ -260,7 +263,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -295,7 +298,8 @@ func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot // Parameters: // resourceGroupName - the name of the resource group. // snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot -// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. // grantAccessData - access data object supplied in the body of the get snapshot access operation. func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (result SnapshotsGrantAccessFuture, err error) { if tracing.IsEnabled() { @@ -337,7 +341,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -425,7 +429,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -541,7 +545,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -613,7 +617,8 @@ func (client SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, r // Parameters: // resourceGroupName - the name of the resource group. // snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot -// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsRevokeAccessFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.RevokeAccess") @@ -648,7 +653,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -692,7 +697,8 @@ func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result // Parameters: // resourceGroupName - the name of the resource group. // snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot -// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. // snapshot - snapshot object supplied in the body of the Patch snapshot operation. func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (result SnapshotsUpdateFuture, err error) { if tracing.IsEnabled() { @@ -728,7 +734,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/sshpublickeys.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/sshpublickeys.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go index 58a8c9e4ff87..896ec1f1edbf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/sshpublickeys.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go @@ -76,7 +76,7 @@ func (client SSHPublicKeysClient) CreatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -154,7 +154,7 @@ func (client SSHPublicKeysClient) DeletePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -231,7 +231,7 @@ func (client SSHPublicKeysClient) GenerateKeyPairPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -307,7 +307,7 @@ func (client SSHPublicKeysClient) GetPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -387,7 +387,7 @@ func (client SSHPublicKeysClient) ListByResourceGroupPreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -501,7 +501,7 @@ func (client SSHPublicKeysClient) ListBySubscriptionPreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -615,7 +615,7 @@ func (client SSHPublicKeysClient) UpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/usage.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go index 65d7cfcdf4ed..ab20765aebf9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go @@ -86,7 +86,7 @@ func (client UsageClient) ListPreparer(ctx context.Context, location string) (*h "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go similarity index 90% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go index fa1cb49d3173..56cb9d0c28d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go @@ -10,7 +10,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " compute/2020-12-01" + return "Azure-SDK-For-Go/" + Version() + " compute/2021-07-01" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineextensionimages.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go index 01aa7197a1ab..e359238e21cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go @@ -77,7 +77,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex "version": autorest.Encode("path", version), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -152,7 +152,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -229,7 +229,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte "type": autorest.Encode("path", typeParameter), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineextensions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go index 2a8c6aa5a99d..a73c28d02725 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go @@ -72,7 +72,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context. "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -155,7 +155,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -243,7 +243,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -323,7 +323,7 @@ func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, r "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -398,7 +398,7 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineimages.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go index 5cc60e1dc730..95b6c52e34af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go @@ -82,7 +82,7 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati "version": autorest.Encode("path", version), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -163,7 +163,7 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -248,7 +248,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -322,7 +322,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -400,7 +400,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineimagesedgezone.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineimagesedgezone.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go index c7150075b454..8fb48bf680d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineimagesedgezone.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go @@ -84,7 +84,7 @@ func (client VirtualMachineImagesEdgeZoneClient) GetPreparer(ctx context.Context "version": autorest.Encode("path", version), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -170,7 +170,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListPreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -257,7 +257,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListOffersPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -333,7 +333,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListPublishersPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -413,7 +413,7 @@ func (client VirtualMachineImagesEdgeZoneClient) ListSkusPreparer(ctx context.Co "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineruncommands.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go index 07aedda44bda..5a2f07fafb75 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go @@ -73,7 +73,7 @@ func (client VirtualMachineRunCommandsClient) CreateOrUpdatePreparer(ctx context "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -156,7 +156,7 @@ func (client VirtualMachineRunCommandsClient) DeletePreparer(ctx context.Context "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -247,7 +247,7 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -326,7 +326,7 @@ func (client VirtualMachineRunCommandsClient) GetByVirtualMachinePreparer(ctx co "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -414,7 +414,7 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -533,7 +533,7 @@ func (client VirtualMachineRunCommandsClient) ListByVirtualMachinePreparer(ctx c "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -645,7 +645,7 @@ func (client VirtualMachineRunCommandsClient) UpdatePreparer(ctx context.Context "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachines.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go index 73ab326f9310..d1aff5e7573b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go @@ -69,7 +69,7 @@ func (client VirtualMachinesClient) AssessPatchesPreparer(ctx context.Context, r "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -158,7 +158,7 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -240,7 +240,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -341,7 +341,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -390,7 +390,8 @@ func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. -func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesDeallocateFuture, err error) { +// hibernate - optional parameter to hibernate a virtual machine. (Feature in Preview) +func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGroupName string, VMName string, hibernate *bool) (result VirtualMachinesDeallocateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Deallocate") defer func() { @@ -401,7 +402,7 @@ func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGrou tracing.EndSpan(ctx, sc, err) }() } - req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMName) + req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMName, hibernate) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request") return @@ -417,17 +418,20 @@ func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGrou } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { +func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMName string, hibernate *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if hibernate != nil { + queryParameters["hibernate"] = autorest.Encode("query", *hibernate) + } preparer := autorest.CreatePreparer( autorest.AsPost(), @@ -503,7 +507,7 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -548,9 +552,9 @@ func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result // Generalize sets the OS state of the virtual machine to generalized. It is recommended to sysprep the virtual machine // before performing this operation.
    For Windows, please refer to [Create a managed image of a generalized VM in -// Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/capture-image-resource).
    For Linux, please -// refer to [How to create an image of a virtual machine or -// VHD](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image). +// Azure](https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource).
    For Linux, please refer +// to [How to create an image of a virtual machine or +// VHD](https://docs.microsoft.com/azure/virtual-machines/linux/capture-image). // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. @@ -595,7 +599,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -629,7 +633,10 @@ func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (re // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. -// expand - the expand expression to apply on the operation. +// expand - the expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime +// properties of the virtual machine that is managed by the platform and can change outside of control plane +// operations. 'UserData' retrieves the UserData property as part of the VM model view that was provided by the +// user during the VM Create/Update operation. func (client VirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand InstanceViewTypes) (result VirtualMachine, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Get") @@ -671,7 +678,7 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -721,12 +728,6 @@ func (client VirtualMachinesClient) InstallPatches(ctx context.Context, resource tracing.EndSpan(ctx, sc, err) }() } - if err := validation.Validate([]validation.Validation{ - {TargetValue: installPatchesInput, - Constraints: []validation.Constraint{{Target: "installPatchesInput.MaximumDuration", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("compute.VirtualMachinesClient", "InstallPatches", err.Error()) - } - req, err := client.InstallPatchesPreparer(ctx, resourceGroupName, VMName, installPatchesInput) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstallPatches", nil, "Failure preparing request") @@ -750,7 +751,7 @@ func (client VirtualMachinesClient) InstallPatchesPreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -838,7 +839,7 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -918,7 +919,7 @@ func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1034,7 +1035,7 @@ func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context, statusO "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1150,7 +1151,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1235,7 +1236,7 @@ func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1341,7 +1342,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1423,7 +1424,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1506,7 +1507,7 @@ func (client VirtualMachinesClient) ReapplyPreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1584,7 +1585,7 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1663,7 +1664,7 @@ func (client VirtualMachinesClient) ReimagePreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1746,7 +1747,7 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1834,7 +1835,7 @@ func (client VirtualMachinesClient) RetrieveBootDiagnosticsDataPreparer(ctx cont "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1913,7 +1914,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2001,7 +2002,7 @@ func (client VirtualMachinesClient) SimulateEvictionPreparer(ctx context.Context "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2069,7 +2070,7 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2148,7 +2149,7 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetextensions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go index 7f49b888897c..9e71f3ba9100 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go @@ -72,7 +72,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -156,7 +156,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context. "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -244,7 +244,7 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -328,7 +328,7 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -437,7 +437,7 @@ func (client VirtualMachineScaleSetExtensionsClient) UpdatePreparer(ctx context. "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetrollingupgrades.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go index c834b8bdf9be..be9de5194bb5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go @@ -70,7 +70,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -155,7 +155,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -226,7 +226,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeP "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -305,7 +305,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesets.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go index 52282335a259..28523a860773 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go @@ -79,7 +79,7 @@ func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupPrepare "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -173,7 +173,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -256,7 +256,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -340,7 +340,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -430,7 +430,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -522,7 +522,7 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "platformUpdateDomain": autorest.Encode("query", platformUpdateDomain), @@ -558,7 +558,9 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. -func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSet, err error) { +// expand - the expand expression to apply on the operation. 'UserData' retrieves the UserData property of the +// VM scale set that was provided by the user during the VM scale set Create/Update operation +func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, expand ExpandTypesForGetVMScaleSets) (result VirtualMachineScaleSet, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Get") defer func() { @@ -569,7 +571,7 @@ func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGro tracing.EndSpan(ctx, sc, err) }() } - req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName) + req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, expand) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request") return @@ -592,17 +594,20 @@ func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGro } // GetPreparer prepares the Get request. -func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, expand ExpandTypesForGetVMScaleSets) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -675,7 +680,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -756,7 +761,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -872,7 +877,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -987,7 +992,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1109,7 +1114,7 @@ func (client VirtualMachineScaleSetsClient) ListByLocationPreparer(ctx context.C "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1228,7 +1233,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1298,7 +1303,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusComplete(ctx context.Context // PerformMaintenance perform maintenance on one or more virtual machines in a VM scale set. Operation on instances // which are not eligible for perform maintenance will be failed. Please refer to best practices for more details: -// https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications +// https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. @@ -1337,7 +1342,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1425,7 +1430,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1515,7 +1520,7 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1600,7 +1605,7 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1685,7 +1690,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1769,7 +1774,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1853,7 +1858,7 @@ func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceStatePreparer "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1934,7 +1939,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2018,7 +2023,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2106,7 +2111,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvmextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvmextensions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go index 4c44738c6f5a..9713046e25d8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvmextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go @@ -75,7 +75,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdatePreparer(ct "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -162,7 +162,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) DeletePreparer(ctx contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -252,7 +252,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) GetPreparer(ctx context.C "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -334,7 +334,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) ListPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -411,7 +411,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) UpdatePreparer(ctx contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvmruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvmruncommands.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go index f4a8843d6d7e..4fbb0c104507 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvmruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go @@ -75,7 +75,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdatePreparer(c "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -160,7 +160,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) DeletePreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -250,7 +250,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) GetPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -337,7 +337,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) ListPreparer(ctx context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -451,7 +451,7 @@ func (client VirtualMachineScaleSetVMRunCommandsClient) UpdatePreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvms.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go index 249df3139c55..f46f6c4f52f1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go @@ -74,7 +74,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -156,7 +156,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -204,7 +204,8 @@ func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Respons // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. // instanceID - the instance ID of the virtual machine. -// expand - the expand expression to apply on the operation. +// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the instance view of +// the virtual machine. 'UserData' will retrieve the UserData of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (result VirtualMachineScaleSetVM, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Get") @@ -247,7 +248,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -328,7 +329,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -413,7 +414,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -530,7 +531,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -614,7 +615,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -700,7 +701,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -781,7 +782,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -867,7 +868,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -947,7 +948,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1038,7 +1039,7 @@ func (client VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsDataPrepare "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1119,7 +1120,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1209,7 +1210,7 @@ func (client VirtualMachineScaleSetVMsClient) SimulateEvictionPreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1279,7 +1280,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1381,7 +1382,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinesizes.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go index 4eca9cbe2ccf..394d659c218c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go @@ -32,8 +32,7 @@ func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID stri return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List this API is deprecated. Use [Resources -// Skus](https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list) +// List this API is deprecated. Use [Resources Skus](https://docs.microsoft.com/rest/api/compute/resourceskus/list) // Parameters: // location - the location upon which virtual-machine-sizes is queried. func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) { @@ -82,7 +81,7 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-12-01" + const APIVersion = "2021-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/_meta.json deleted file mode 100644 index abaa58ce9171..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/datalake-analytics/resource-manager/readme.md", - "tag": "package-2016-11", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2016-11 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/datalake-analytics/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/accounts.go deleted file mode 100644 index bcb262e7a950..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/accounts.go +++ /dev/null @@ -1,769 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AccountsClient is the creates an Azure Data Lake Analytics account management client. -type AccountsClient struct { - BaseClient -} - -// NewAccountsClient creates an instance of the AccountsClient client. -func NewAccountsClient(subscriptionID string) AccountsClient { - return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { - return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckNameAvailability checks whether the specified account name is available or taken. -// Parameters: -// location - the resource location without whitespace. -// parameters - parameters supplied to check the Data Lake Analytics account name availability. -func (client AccountsClient) CheckNameAvailability(ctx context.Context, location string, parameters CheckNameAvailabilityParameters) (result NameAvailabilityInformation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "CheckNameAvailability", err.Error()) - } - - req, err := client.CheckNameAvailabilityPreparer(ctx, location, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") - return - } - - resp, err := client.CheckNameAvailabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") - return - } - - result, err = client.CheckNameAvailabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") - return - } - - return -} - -// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. -func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, location string, parameters CheckNameAvailabilityParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeAnalytics/locations/{location}/checkNameAvailability", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always -// closes the http.Response Body. -func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result NameAvailabilityInformation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Create creates the specified Data Lake Analytics account. This supplies the user with computation services for Data -// Lake Analytics workloads. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// parameters - parameters supplied to create a new Data Lake Analytics account. -func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters CreateDataLakeAnalyticsAccountParameters) (result AccountsCreateFutureType, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeAnalyticsAccountProperties.DefaultDataLakeStoreAccount", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.DataLakeStoreAccounts", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MaxJobCount", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MaxJobCount", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MaxDegreeOfParallelism", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MaxDegreeOfParallelism", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MaxDegreeOfParallelismPerJob", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MaxDegreeOfParallelismPerJob", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MinPriorityPerJob", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeAnalyticsAccountProperties.MinPriorityPerJob", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.QueryStoreRetention", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeAnalyticsAccountProperties.QueryStoreRetention", Name: validation.InclusiveMaximum, Rule: int64(180), Chain: nil}, - {Target: "parameters.CreateDataLakeAnalyticsAccountProperties.QueryStoreRetention", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Create", nil, "Failure preparing request") - return - } - - result, err = client.CreateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Create", result.Response(), "Failure sending request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters CreateDataLakeAnalyticsAccountParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFutureType, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client AccountsClient) CreateResponder(resp *http.Response) (result DataLakeAnalyticsAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete begins the delete process for the Data Lake Analytics account object specified by the account name. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result AccountsDeleteFutureType, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) DeleteSender(req *http.Request) (future AccountsDeleteFutureType, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets details of the specified Data Lake Analytics account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -func (client AccountsClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result DataLakeAnalyticsAccount, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AccountsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AccountsClient) GetResponder(resp *http.Response) (result DataLakeAnalyticsAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets the first page of Data Lake Analytics accounts, if any, within the current subscription. This includes a -// link to the next page, if any. -// Parameters: -// filter - oData filter. Optional. -// top - the number of items to return. Optional. -// skip - the number of items to skip over before returning elements. Optional. -// selectParameter - oData Select statement. Limits the properties on each entry to just those requested, e.g. -// Categories?$select=CategoryName,Description. Optional. -// orderby - orderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or -// "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. -// Optional. -// count - the Boolean value of true or false to request a count of the matching resources included with the -// resources in the response, e.g. Categories?$count=true. Optional. -func (client AccountsClient) List(ctx context.Context, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeAnalyticsAccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") - defer func() { - sc := -1 - if result.dlaalr.Response.Response != nil { - sc = result.dlaalr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, filter, top, skip, selectParameter, orderby, count) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.dlaalr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "List", resp, "Failure sending request") - return - } - - result.dlaalr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "List", resp, "Failure responding to request") - return - } - if result.dlaalr.hasNextLink() && result.dlaalr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client AccountsClient) ListPreparer(ctx context.Context, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - if len(orderby) > 0 { - queryParameters["$orderby"] = autorest.Encode("query", orderby) - } - if count != nil { - queryParameters["$count"] = autorest.Encode("query", *count) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeAnalytics/accounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListResponder(resp *http.Response) (result DataLakeAnalyticsAccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client AccountsClient) listNextResults(ctx context.Context, lastResults DataLakeAnalyticsAccountListResult) (result DataLakeAnalyticsAccountListResult, err error) { - req, err := lastResults.dataLakeAnalyticsAccountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client AccountsClient) ListComplete(ctx context.Context, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeAnalyticsAccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, filter, top, skip, selectParameter, orderby, count) - return -} - -// ListByResourceGroup gets the first page of Data Lake Analytics accounts, if any, within a specific resource group. -// This includes a link to the next page, if any. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// filter - oData filter. Optional. -// top - the number of items to return. Optional. -// skip - the number of items to skip over before returning elements. Optional. -// selectParameter - oData Select statement. Limits the properties on each entry to just those requested, e.g. -// Categories?$select=CategoryName,Description. Optional. -// orderby - orderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or -// "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. -// Optional. -// count - the Boolean value of true or false to request a count of the matching resources included with the -// resources in the response, e.g. Categories?$count=true. Optional. -func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeAnalyticsAccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.dlaalr.Response.Response != nil { - sc = result.dlaalr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter, top, skip, selectParameter, orderby, count) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.dlaalr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.dlaalr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.dlaalr.hasNextLink() && result.dlaalr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - if len(orderby) > 0 { - queryParameters["$orderby"] = autorest.Encode("query", orderby) - } - if count != nil { - queryParameters["$count"] = autorest.Encode("query", *count) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result DataLakeAnalyticsAccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client AccountsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DataLakeAnalyticsAccountListResult) (result DataLakeAnalyticsAccountListResult, err error) { - req, err := lastResults.dataLakeAnalyticsAccountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client AccountsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeAnalyticsAccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, filter, top, skip, selectParameter, orderby, count) - return -} - -// Update updates the Data Lake Analytics account object specified by the accountName with the contents of the account -// object. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// parameters - parameters supplied to the update Data Lake Analytics account operation. -func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters *UpdateDataLakeAnalyticsAccountParameters) (result AccountsUpdateFutureType, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Update", nil, "Failure preparing request") - return - } - - result, err = client.UpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Update", result.Response(), "Failure sending request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters *UpdateDataLakeAnalyticsAccountParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) UpdateSender(req *http.Request) (future AccountsUpdateFutureType, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client AccountsClient) UpdateResponder(resp *http.Response) (result DataLakeAnalyticsAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/client.go deleted file mode 100644 index 6070fbed59f6..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/client.go +++ /dev/null @@ -1,41 +0,0 @@ -// Package account implements the Azure ARM Account service API version 2016-11-01. -// -// Creates an Azure Data Lake Analytics account management client. -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Account - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Account. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with -// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/computepolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/computepolicies.go deleted file mode 100644 index 31bd51601eaa..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/computepolicies.go +++ /dev/null @@ -1,485 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ComputePoliciesClient is the creates an Azure Data Lake Analytics account management client. -type ComputePoliciesClient struct { - BaseClient -} - -// NewComputePoliciesClient creates an instance of the ComputePoliciesClient client. -func NewComputePoliciesClient(subscriptionID string) ComputePoliciesClient { - return NewComputePoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewComputePoliciesClientWithBaseURI creates an instance of the ComputePoliciesClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewComputePoliciesClientWithBaseURI(baseURI string, subscriptionID string) ComputePoliciesClient { - return ComputePoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates the specified compute policy. During update, the compute policy with the specified -// name will be replaced with this new compute policy. An account supports, at most, 50 policies -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// computePolicyName - the name of the compute policy to create or update. -// parameters - parameters supplied to create or update the compute policy. The max degree of parallelism per -// job property, min priority per job property, or both must be present. -func (client ComputePoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string, parameters CreateOrUpdateComputePolicyParameters) (result ComputePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePoliciesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CreateOrUpdateComputePolicyProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateComputePolicyProperties.ObjectID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateOrUpdateComputePolicyProperties.MaxDegreeOfParallelismPerJob", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateComputePolicyProperties.MaxDegreeOfParallelismPerJob", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.CreateOrUpdateComputePolicyProperties.MinPriorityPerJob", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateComputePolicyProperties.MinPriorityPerJob", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - }}}}}); err != nil { - return result, validation.NewError("account.ComputePoliciesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, computePolicyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ComputePoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string, parameters CreateOrUpdateComputePolicyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "computePolicyName": autorest.Encode("path", computePolicyName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ComputePoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ComputePoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ComputePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the specified compute policy from the specified Data Lake Analytics account -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// computePolicyName - the name of the compute policy to delete. -func (client ComputePoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePoliciesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, computePolicyName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ComputePoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "computePolicyName": autorest.Encode("path", computePolicyName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ComputePoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ComputePoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Analytics compute policy. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// computePolicyName - the name of the compute policy to retrieve. -func (client ComputePoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string) (result ComputePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePoliciesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, computePolicyName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ComputePoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "computePolicyName": autorest.Encode("path", computePolicyName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ComputePoliciesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ComputePoliciesClient) GetResponder(resp *http.Response) (result ComputePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount lists the Data Lake Analytics compute policies within the specified Data Lake Analytics account. An -// account supports, at most, 50 policies -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -func (client ComputePoliciesClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string) (result ComputePolicyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePoliciesClient.ListByAccount") - defer func() { - sc := -1 - if result.cplr.Response.Response != nil { - sc = result.cplr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.cplr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.cplr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.cplr.hasNextLink() && result.cplr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client ComputePoliciesClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client ComputePoliciesClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client ComputePoliciesClient) ListByAccountResponder(resp *http.Response) (result ComputePolicyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client ComputePoliciesClient) listByAccountNextResults(ctx context.Context, lastResults ComputePolicyListResult) (result ComputePolicyListResult, err error) { - req, err := lastResults.computePolicyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client ComputePoliciesClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string) (result ComputePolicyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePoliciesClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName) - return -} - -// Update updates the specified compute policy. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// computePolicyName - the name of the compute policy to update. -// parameters - parameters supplied to update the compute policy. -func (client ComputePoliciesClient) Update(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string, parameters *UpdateComputePolicyParameters) (result ComputePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePoliciesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, computePolicyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.ComputePoliciesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client ComputePoliciesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, computePolicyName string, parameters *UpdateComputePolicyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "computePolicyName": autorest.Encode("path", computePolicyName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client ComputePoliciesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client ComputePoliciesClient) UpdateResponder(resp *http.Response) (result ComputePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/datalakestoreaccounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/datalakestoreaccounts.go deleted file mode 100644 index 8de9783a1496..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/datalakestoreaccounts.go +++ /dev/null @@ -1,428 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// DataLakeStoreAccountsClient is the creates an Azure Data Lake Analytics account management client. -type DataLakeStoreAccountsClient struct { - BaseClient -} - -// NewDataLakeStoreAccountsClient creates an instance of the DataLakeStoreAccountsClient client. -func NewDataLakeStoreAccountsClient(subscriptionID string) DataLakeStoreAccountsClient { - return NewDataLakeStoreAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDataLakeStoreAccountsClientWithBaseURI creates an instance of the DataLakeStoreAccountsClient client using a -// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, -// Azure stack). -func NewDataLakeStoreAccountsClientWithBaseURI(baseURI string, subscriptionID string) DataLakeStoreAccountsClient { - return DataLakeStoreAccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Add updates the specified Data Lake Analytics account to include the additional Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// dataLakeStoreAccountName - the name of the Data Lake Store account to add. -// parameters - the details of the Data Lake Store account. -func (client DataLakeStoreAccountsClient) Add(ctx context.Context, resourceGroupName string, accountName string, dataLakeStoreAccountName string, parameters *AddDataLakeStoreParameters) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountsClient.Add") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.AddPreparer(ctx, resourceGroupName, accountName, dataLakeStoreAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Add", nil, "Failure preparing request") - return - } - - resp, err := client.AddSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Add", resp, "Failure sending request") - return - } - - result, err = client.AddResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Add", resp, "Failure responding to request") - return - } - - return -} - -// AddPreparer prepares the Add request. -func (client DataLakeStoreAccountsClient) AddPreparer(ctx context.Context, resourceGroupName string, accountName string, dataLakeStoreAccountName string, parameters *AddDataLakeStoreParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "dataLakeStoreAccountName": autorest.Encode("path", dataLakeStoreAccountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/dataLakeStoreAccounts/{dataLakeStoreAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// AddSender sends the Add request. The method will close the -// http.Response Body if it receives an error. -func (client DataLakeStoreAccountsClient) AddSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// AddResponder handles the response to the Add request. The method always -// closes the http.Response Body. -func (client DataLakeStoreAccountsClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Delete updates the Data Lake Analytics account specified to remove the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// dataLakeStoreAccountName - the name of the Data Lake Store account to remove -func (client DataLakeStoreAccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, dataLakeStoreAccountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, dataLakeStoreAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client DataLakeStoreAccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, dataLakeStoreAccountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "dataLakeStoreAccountName": autorest.Encode("path", dataLakeStoreAccountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/dataLakeStoreAccounts/{dataLakeStoreAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client DataLakeStoreAccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client DataLakeStoreAccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Store account details in the specified Data Lake Analytics account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// dataLakeStoreAccountName - the name of the Data Lake Store account to retrieve -func (client DataLakeStoreAccountsClient) Get(ctx context.Context, resourceGroupName string, accountName string, dataLakeStoreAccountName string) (result DataLakeStoreAccountInformation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, dataLakeStoreAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client DataLakeStoreAccountsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, dataLakeStoreAccountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "dataLakeStoreAccountName": autorest.Encode("path", dataLakeStoreAccountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/dataLakeStoreAccounts/{dataLakeStoreAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DataLakeStoreAccountsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DataLakeStoreAccountsClient) GetResponder(resp *http.Response) (result DataLakeStoreAccountInformation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount gets the first page of Data Lake Store accounts linked to the specified Data Lake Analytics account. -// The response includes a link to the next page, if any. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// filter - oData filter. Optional. -// top - the number of items to return. Optional. -// skip - the number of items to skip over before returning elements. Optional. -// selectParameter - oData Select statement. Limits the properties on each entry to just those requested, e.g. -// Categories?$select=CategoryName,Description. Optional. -// orderby - orderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or -// "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. -// Optional. -// count - the Boolean value of true or false to request a count of the matching resources included with the -// resources in the response, e.g. Categories?$count=true. Optional. -func (client DataLakeStoreAccountsClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeStoreAccountInformationListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountsClient.ListByAccount") - defer func() { - sc := -1 - if result.dlsailr.Response.Response != nil { - sc = result.dlsailr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.DataLakeStoreAccountsClient", "ListByAccount", err.Error()) - } - - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName, filter, top, skip, selectParameter, orderby, count) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.dlsailr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.dlsailr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.dlsailr.hasNextLink() && result.dlsailr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client DataLakeStoreAccountsClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - if len(orderby) > 0 { - queryParameters["$orderby"] = autorest.Encode("query", orderby) - } - if count != nil { - queryParameters["$count"] = autorest.Encode("query", *count) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/dataLakeStoreAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client DataLakeStoreAccountsClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client DataLakeStoreAccountsClient) ListByAccountResponder(resp *http.Response) (result DataLakeStoreAccountInformationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client DataLakeStoreAccountsClient) listByAccountNextResults(ctx context.Context, lastResults DataLakeStoreAccountInformationListResult) (result DataLakeStoreAccountInformationListResult, err error) { - req, err := lastResults.dataLakeStoreAccountInformationListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.DataLakeStoreAccountsClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client DataLakeStoreAccountsClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeStoreAccountInformationListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountsClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName, filter, top, skip, selectParameter, orderby, count) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/enums.go deleted file mode 100644 index a3b84b0253a4..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/enums.go +++ /dev/null @@ -1,169 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// AADObjectType enumerates the values for aad object type. -type AADObjectType string - -const ( - // Group ... - Group AADObjectType = "Group" - // ServicePrincipal ... - ServicePrincipal AADObjectType = "ServicePrincipal" - // User ... - User AADObjectType = "User" -) - -// PossibleAADObjectTypeValues returns an array of possible values for the AADObjectType const type. -func PossibleAADObjectTypeValues() []AADObjectType { - return []AADObjectType{Group, ServicePrincipal, User} -} - -// DataLakeAnalyticsAccountState enumerates the values for data lake analytics account state. -type DataLakeAnalyticsAccountState string - -const ( - // Active ... - Active DataLakeAnalyticsAccountState = "Active" - // Suspended ... - Suspended DataLakeAnalyticsAccountState = "Suspended" -) - -// PossibleDataLakeAnalyticsAccountStateValues returns an array of possible values for the DataLakeAnalyticsAccountState const type. -func PossibleDataLakeAnalyticsAccountStateValues() []DataLakeAnalyticsAccountState { - return []DataLakeAnalyticsAccountState{Active, Suspended} -} - -// DataLakeAnalyticsAccountStatus enumerates the values for data lake analytics account status. -type DataLakeAnalyticsAccountStatus string - -const ( - // Canceled ... - Canceled DataLakeAnalyticsAccountStatus = "Canceled" - // Creating ... - Creating DataLakeAnalyticsAccountStatus = "Creating" - // Deleted ... - Deleted DataLakeAnalyticsAccountStatus = "Deleted" - // Deleting ... - Deleting DataLakeAnalyticsAccountStatus = "Deleting" - // Failed ... - Failed DataLakeAnalyticsAccountStatus = "Failed" - // Patching ... - Patching DataLakeAnalyticsAccountStatus = "Patching" - // Resuming ... - Resuming DataLakeAnalyticsAccountStatus = "Resuming" - // Running ... - Running DataLakeAnalyticsAccountStatus = "Running" - // Succeeded ... - Succeeded DataLakeAnalyticsAccountStatus = "Succeeded" - // Suspending ... - Suspending DataLakeAnalyticsAccountStatus = "Suspending" - // Undeleting ... - Undeleting DataLakeAnalyticsAccountStatus = "Undeleting" -) - -// PossibleDataLakeAnalyticsAccountStatusValues returns an array of possible values for the DataLakeAnalyticsAccountStatus const type. -func PossibleDataLakeAnalyticsAccountStatusValues() []DataLakeAnalyticsAccountStatus { - return []DataLakeAnalyticsAccountStatus{Canceled, Creating, Deleted, Deleting, Failed, Patching, Resuming, Running, Succeeded, Suspending, Undeleting} -} - -// FirewallAllowAzureIpsState enumerates the values for firewall allow azure ips state. -type FirewallAllowAzureIpsState string - -const ( - // Disabled ... - Disabled FirewallAllowAzureIpsState = "Disabled" - // Enabled ... - Enabled FirewallAllowAzureIpsState = "Enabled" -) - -// PossibleFirewallAllowAzureIpsStateValues returns an array of possible values for the FirewallAllowAzureIpsState const type. -func PossibleFirewallAllowAzureIpsStateValues() []FirewallAllowAzureIpsState { - return []FirewallAllowAzureIpsState{Disabled, Enabled} -} - -// FirewallState enumerates the values for firewall state. -type FirewallState string - -const ( - // FirewallStateDisabled ... - FirewallStateDisabled FirewallState = "Disabled" - // FirewallStateEnabled ... - FirewallStateEnabled FirewallState = "Enabled" -) - -// PossibleFirewallStateValues returns an array of possible values for the FirewallState const type. -func PossibleFirewallStateValues() []FirewallState { - return []FirewallState{FirewallStateDisabled, FirewallStateEnabled} -} - -// OperationOrigin enumerates the values for operation origin. -type OperationOrigin string - -const ( - // OperationOriginSystem ... - OperationOriginSystem OperationOrigin = "system" - // OperationOriginUser ... - OperationOriginUser OperationOrigin = "user" - // OperationOriginUsersystem ... - OperationOriginUsersystem OperationOrigin = "user,system" -) - -// PossibleOperationOriginValues returns an array of possible values for the OperationOrigin const type. -func PossibleOperationOriginValues() []OperationOrigin { - return []OperationOrigin{OperationOriginSystem, OperationOriginUser, OperationOriginUsersystem} -} - -// SubscriptionState enumerates the values for subscription state. -type SubscriptionState string - -const ( - // SubscriptionStateDeleted ... - SubscriptionStateDeleted SubscriptionState = "Deleted" - // SubscriptionStateRegistered ... - SubscriptionStateRegistered SubscriptionState = "Registered" - // SubscriptionStateSuspended ... - SubscriptionStateSuspended SubscriptionState = "Suspended" - // SubscriptionStateUnregistered ... - SubscriptionStateUnregistered SubscriptionState = "Unregistered" - // SubscriptionStateWarned ... - SubscriptionStateWarned SubscriptionState = "Warned" -) - -// PossibleSubscriptionStateValues returns an array of possible values for the SubscriptionState const type. -func PossibleSubscriptionStateValues() []SubscriptionState { - return []SubscriptionState{SubscriptionStateDeleted, SubscriptionStateRegistered, SubscriptionStateSuspended, SubscriptionStateUnregistered, SubscriptionStateWarned} -} - -// TierType enumerates the values for tier type. -type TierType string - -const ( - // Commitment100000AUHours ... - Commitment100000AUHours TierType = "Commitment_100000AUHours" - // Commitment10000AUHours ... - Commitment10000AUHours TierType = "Commitment_10000AUHours" - // Commitment1000AUHours ... - Commitment1000AUHours TierType = "Commitment_1000AUHours" - // Commitment100AUHours ... - Commitment100AUHours TierType = "Commitment_100AUHours" - // Commitment500000AUHours ... - Commitment500000AUHours TierType = "Commitment_500000AUHours" - // Commitment50000AUHours ... - Commitment50000AUHours TierType = "Commitment_50000AUHours" - // Commitment5000AUHours ... - Commitment5000AUHours TierType = "Commitment_5000AUHours" - // Commitment500AUHours ... - Commitment500AUHours TierType = "Commitment_500AUHours" - // Consumption ... - Consumption TierType = "Consumption" -) - -// PossibleTierTypeValues returns an array of possible values for the TierType const type. -func PossibleTierTypeValues() []TierType { - return []TierType{Commitment100000AUHours, Commitment10000AUHours, Commitment1000AUHours, Commitment100AUHours, Commitment500000AUHours, Commitment50000AUHours, Commitment5000AUHours, Commitment500AUHours, Consumption} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/firewallrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/firewallrules.go deleted file mode 100644 index 9b00275c6fd8..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/firewallrules.go +++ /dev/null @@ -1,480 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// FirewallRulesClient is the creates an Azure Data Lake Analytics account management client. -type FirewallRulesClient struct { - BaseClient -} - -// NewFirewallRulesClient creates an instance of the FirewallRulesClient client. -func NewFirewallRulesClient(subscriptionID string) FirewallRulesClient { - return NewFirewallRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewFirewallRulesClientWithBaseURI creates an instance of the FirewallRulesClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewFirewallRulesClientWithBaseURI(baseURI string, subscriptionID string) FirewallRulesClient { - return FirewallRulesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates the specified firewall rule. During update, the firewall rule with the specified -// name will be replaced with this new firewall rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// firewallRuleName - the name of the firewall rule to create or update. -// parameters - parameters supplied to create or update the firewall rule. -func (client FirewallRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters CreateOrUpdateFirewallRuleParameters) (result FirewallRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CreateOrUpdateFirewallRuleProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateFirewallRuleProperties.StartIPAddress", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateOrUpdateFirewallRuleProperties.EndIPAddress", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("account.FirewallRulesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, firewallRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client FirewallRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters CreateOrUpdateFirewallRuleParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the specified firewall rule from the specified Data Lake Analytics account -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// firewallRuleName - the name of the firewall rule to delete. -func (client FirewallRulesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, firewallRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client FirewallRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Analytics firewall rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// firewallRuleName - the name of the firewall rule to retrieve. -func (client FirewallRulesClient) Get(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (result FirewallRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, firewallRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client FirewallRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) GetResponder(resp *http.Response) (result FirewallRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount lists the Data Lake Analytics firewall rules within the specified Data Lake Analytics account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -func (client FirewallRulesClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string) (result FirewallRuleListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.ListByAccount") - defer func() { - sc := -1 - if result.frlr.Response.Response != nil { - sc = result.frlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.frlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.frlr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.frlr.hasNextLink() && result.frlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client FirewallRulesClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) ListByAccountResponder(resp *http.Response) (result FirewallRuleListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client FirewallRulesClient) listByAccountNextResults(ctx context.Context, lastResults FirewallRuleListResult) (result FirewallRuleListResult, err error) { - req, err := lastResults.firewallRuleListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.FirewallRulesClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.FirewallRulesClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client FirewallRulesClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string) (result FirewallRuleListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName) - return -} - -// Update updates the specified firewall rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// firewallRuleName - the name of the firewall rule to update. -// parameters - parameters supplied to update the firewall rule. -func (client FirewallRulesClient) Update(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters *UpdateFirewallRuleParameters) (result FirewallRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, firewallRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client FirewallRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters *UpdateFirewallRuleParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) UpdateResponder(resp *http.Response) (result FirewallRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/locations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/locations.go deleted file mode 100644 index f404237611b6..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/locations.go +++ /dev/null @@ -1,105 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// LocationsClient is the creates an Azure Data Lake Analytics account management client. -type LocationsClient struct { - BaseClient -} - -// NewLocationsClient creates an instance of the LocationsClient client. -func NewLocationsClient(subscriptionID string) LocationsClient { - return NewLocationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewLocationsClientWithBaseURI creates an instance of the LocationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewLocationsClientWithBaseURI(baseURI string, subscriptionID string) LocationsClient { - return LocationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// GetCapability gets subscription-level properties and limits for Data Lake Analytics specified by resource location. -// Parameters: -// location - the resource location without whitespace. -func (client LocationsClient) GetCapability(ctx context.Context, location string) (result CapabilityInformation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LocationsClient.GetCapability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCapabilityPreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetCapability", nil, "Failure preparing request") - return - } - - resp, err := client.GetCapabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetCapability", resp, "Failure sending request") - return - } - - result, err = client.GetCapabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetCapability", resp, "Failure responding to request") - return - } - - return -} - -// GetCapabilityPreparer prepares the GetCapability request. -func (client LocationsClient) GetCapabilityPreparer(ctx context.Context, location string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeAnalytics/locations/{location}/capability", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCapabilitySender sends the GetCapability request. The method will close the -// http.Response Body if it receives an error. -func (client LocationsClient) GetCapabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetCapabilityResponder handles the response to the GetCapability request. The method always -// closes the http.Response Body. -func (client LocationsClient) GetCapabilityResponder(resp *http.Response) (result CapabilityInformation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/models.go deleted file mode 100644 index c29533732b91..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/models.go +++ /dev/null @@ -1,3090 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "github.com/gofrs/uuid" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account" - -// AccountsCreateFutureType an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsCreateFutureType struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (DataLakeAnalyticsAccount, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsCreateFutureType) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsCreateFutureType.Result. -func (future *AccountsCreateFutureType) result(client AccountsClient) (dlaa DataLakeAnalyticsAccount, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsCreateFutureType", "Result", future.Response(), "Polling failure") - return - } - if !done { - dlaa.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("account.AccountsCreateFutureType") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if dlaa.Response.Response, err = future.GetResult(sender); err == nil && dlaa.Response.Response.StatusCode != http.StatusNoContent { - dlaa, err = client.CreateResponder(dlaa.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsCreateFutureType", "Result", dlaa.Response.Response, "Failure responding to request") - } - } - return -} - -// AccountsDeleteFutureType an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsDeleteFutureType struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsDeleteFutureType) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsDeleteFutureType.Result. -func (future *AccountsDeleteFutureType) result(client AccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsDeleteFutureType", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("account.AccountsDeleteFutureType") - return - } - ar.Response = future.Response() - return -} - -// AccountsUpdateFutureType an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsUpdateFutureType struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (DataLakeAnalyticsAccount, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsUpdateFutureType) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsUpdateFutureType.Result. -func (future *AccountsUpdateFutureType) result(client AccountsClient) (dlaa DataLakeAnalyticsAccount, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsUpdateFutureType", "Result", future.Response(), "Polling failure") - return - } - if !done { - dlaa.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("account.AccountsUpdateFutureType") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if dlaa.Response.Response, err = future.GetResult(sender); err == nil && dlaa.Response.Response.StatusCode != http.StatusNoContent { - dlaa, err = client.UpdateResponder(dlaa.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsUpdateFutureType", "Result", dlaa.Response.Response, "Failure responding to request") - } - } - return -} - -// AddDataLakeStoreParameters the parameters used to add a new Data Lake Store account. -type AddDataLakeStoreParameters struct { - // AddDataLakeStoreProperties - The Data Lake Store account properties to use when adding a new Data Lake Store account. - *AddDataLakeStoreProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AddDataLakeStoreParameters. -func (adlsp AddDataLakeStoreParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if adlsp.AddDataLakeStoreProperties != nil { - objectMap["properties"] = adlsp.AddDataLakeStoreProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AddDataLakeStoreParameters struct. -func (adlsp *AddDataLakeStoreParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var addDataLakeStoreProperties AddDataLakeStoreProperties - err = json.Unmarshal(*v, &addDataLakeStoreProperties) - if err != nil { - return err - } - adlsp.AddDataLakeStoreProperties = &addDataLakeStoreProperties - } - } - } - - return nil -} - -// AddDataLakeStoreProperties the Data Lake Store account properties to use when adding a new Data Lake -// Store account. -type AddDataLakeStoreProperties struct { - // Suffix - The optional suffix for the Data Lake Store account. - Suffix *string `json:"suffix,omitempty"` -} - -// AddDataLakeStoreWithAccountParameters the parameters used to add a new Data Lake Store account while -// creating a new Data Lake Analytics account. -type AddDataLakeStoreWithAccountParameters struct { - // Name - The unique name of the Data Lake Store account to add. - Name *string `json:"name,omitempty"` - // AddDataLakeStoreProperties - The Data Lake Store account properties to use when adding a new Data Lake Store account. - *AddDataLakeStoreProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AddDataLakeStoreWithAccountParameters. -func (adlswap AddDataLakeStoreWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if adlswap.Name != nil { - objectMap["name"] = adlswap.Name - } - if adlswap.AddDataLakeStoreProperties != nil { - objectMap["properties"] = adlswap.AddDataLakeStoreProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AddDataLakeStoreWithAccountParameters struct. -func (adlswap *AddDataLakeStoreWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - adlswap.Name = &name - } - case "properties": - if v != nil { - var addDataLakeStoreProperties AddDataLakeStoreProperties - err = json.Unmarshal(*v, &addDataLakeStoreProperties) - if err != nil { - return err - } - adlswap.AddDataLakeStoreProperties = &addDataLakeStoreProperties - } - } - } - - return nil -} - -// AddStorageAccountParameters the parameters used to add a new Azure Storage account. -type AddStorageAccountParameters struct { - // AddStorageAccountProperties - The Azure Storage account properties to use when adding a new Azure Storage account. - *AddStorageAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AddStorageAccountParameters. -func (asap AddStorageAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if asap.AddStorageAccountProperties != nil { - objectMap["properties"] = asap.AddStorageAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AddStorageAccountParameters struct. -func (asap *AddStorageAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var addStorageAccountProperties AddStorageAccountProperties - err = json.Unmarshal(*v, &addStorageAccountProperties) - if err != nil { - return err - } - asap.AddStorageAccountProperties = &addStorageAccountProperties - } - } - } - - return nil -} - -// AddStorageAccountProperties the Azure Storage account properties to use when adding a new Azure Storage -// account. -type AddStorageAccountProperties struct { - // AccessKey - The access key associated with this Azure Storage account that will be used to connect to it. - AccessKey *string `json:"accessKey,omitempty"` - // Suffix - The optional suffix for the storage account. - Suffix *string `json:"suffix,omitempty"` -} - -// AddStorageAccountWithAccountParameters the parameters used to add a new Azure Storage account while -// creating a new Data Lake Analytics account. -type AddStorageAccountWithAccountParameters struct { - // Name - The unique name of the Azure Storage account to add. - Name *string `json:"name,omitempty"` - // AddStorageAccountProperties - The Azure Storage account properties to use when adding a new Azure Storage account. - *AddStorageAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AddStorageAccountWithAccountParameters. -func (asawap AddStorageAccountWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if asawap.Name != nil { - objectMap["name"] = asawap.Name - } - if asawap.AddStorageAccountProperties != nil { - objectMap["properties"] = asawap.AddStorageAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AddStorageAccountWithAccountParameters struct. -func (asawap *AddStorageAccountWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - asawap.Name = &name - } - case "properties": - if v != nil { - var addStorageAccountProperties AddStorageAccountProperties - err = json.Unmarshal(*v, &addStorageAccountProperties) - if err != nil { - return err - } - asawap.AddStorageAccountProperties = &addStorageAccountProperties - } - } - } - - return nil -} - -// CapabilityInformation subscription-level properties and limits for Data Lake Analytics. -type CapabilityInformation struct { - autorest.Response `json:"-"` - // SubscriptionID - READ-ONLY; The subscription credentials that uniquely identifies the subscription. - SubscriptionID *uuid.UUID `json:"subscriptionId,omitempty"` - // State - READ-ONLY; The subscription state. Possible values include: 'SubscriptionStateRegistered', 'SubscriptionStateSuspended', 'SubscriptionStateDeleted', 'SubscriptionStateUnregistered', 'SubscriptionStateWarned' - State SubscriptionState `json:"state,omitempty"` - // MaxAccountCount - READ-ONLY; The maximum supported number of accounts under this subscription. - MaxAccountCount *int32 `json:"maxAccountCount,omitempty"` - // AccountCount - READ-ONLY; The current number of accounts under this subscription. - AccountCount *int32 `json:"accountCount,omitempty"` - // MigrationState - READ-ONLY; The Boolean value of true or false to indicate the maintenance state. - MigrationState *bool `json:"migrationState,omitempty"` -} - -// MarshalJSON is the custom marshaler for CapabilityInformation. -func (ci CapabilityInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CheckNameAvailabilityParameters data Lake Analytics account name availability check parameters. -type CheckNameAvailabilityParameters struct { - // Name - The Data Lake Analytics name to check availability for. - Name *string `json:"name,omitempty"` - // Type - The resource type. Note: This should not be set by the user, as the constant value is Microsoft.DataLakeAnalytics/accounts - Type *string `json:"type,omitempty"` -} - -// ComputePolicy data Lake Analytics compute policy information. -type ComputePolicy struct { - autorest.Response `json:"-"` - // ComputePolicyProperties - READ-ONLY; The compute policy properties. - *ComputePolicyProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ComputePolicy. -func (cp ComputePolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ComputePolicy struct. -func (cp *ComputePolicy) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var computePolicyProperties ComputePolicyProperties - err = json.Unmarshal(*v, &computePolicyProperties) - if err != nil { - return err - } - cp.ComputePolicyProperties = &computePolicyProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - cp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - cp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - cp.Type = &typeVar - } - } - } - - return nil -} - -// ComputePolicyListResult the list of compute policies in the account. -type ComputePolicyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]ComputePolicy `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ComputePolicyListResult. -func (cplr ComputePolicyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ComputePolicyListResultIterator provides access to a complete listing of ComputePolicy values. -type ComputePolicyListResultIterator struct { - i int - page ComputePolicyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ComputePolicyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePolicyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ComputePolicyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ComputePolicyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ComputePolicyListResultIterator) Response() ComputePolicyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ComputePolicyListResultIterator) Value() ComputePolicy { - if !iter.page.NotDone() { - return ComputePolicy{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ComputePolicyListResultIterator type. -func NewComputePolicyListResultIterator(page ComputePolicyListResultPage) ComputePolicyListResultIterator { - return ComputePolicyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (cplr ComputePolicyListResult) IsEmpty() bool { - return cplr.Value == nil || len(*cplr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (cplr ComputePolicyListResult) hasNextLink() bool { - return cplr.NextLink != nil && len(*cplr.NextLink) != 0 -} - -// computePolicyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (cplr ComputePolicyListResult) computePolicyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !cplr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(cplr.NextLink))) -} - -// ComputePolicyListResultPage contains a page of ComputePolicy values. -type ComputePolicyListResultPage struct { - fn func(context.Context, ComputePolicyListResult) (ComputePolicyListResult, error) - cplr ComputePolicyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ComputePolicyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ComputePolicyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.cplr) - if err != nil { - return err - } - page.cplr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ComputePolicyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ComputePolicyListResultPage) NotDone() bool { - return !page.cplr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ComputePolicyListResultPage) Response() ComputePolicyListResult { - return page.cplr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ComputePolicyListResultPage) Values() []ComputePolicy { - if page.cplr.IsEmpty() { - return nil - } - return *page.cplr.Value -} - -// Creates a new instance of the ComputePolicyListResultPage type. -func NewComputePolicyListResultPage(cur ComputePolicyListResult, getNextPage func(context.Context, ComputePolicyListResult) (ComputePolicyListResult, error)) ComputePolicyListResultPage { - return ComputePolicyListResultPage{ - fn: getNextPage, - cplr: cur, - } -} - -// ComputePolicyProperties the compute policy properties. -type ComputePolicyProperties struct { - // ObjectID - READ-ONLY; The AAD object identifier for the entity to create a policy for. - ObjectID *uuid.UUID `json:"objectId,omitempty"` - // ObjectType - READ-ONLY; The type of AAD object the object identifier refers to. Possible values include: 'User', 'Group', 'ServicePrincipal' - ObjectType AADObjectType `json:"objectType,omitempty"` - // MaxDegreeOfParallelismPerJob - READ-ONLY; The maximum degree of parallelism per job this user can use to submit jobs. - MaxDegreeOfParallelismPerJob *int32 `json:"maxDegreeOfParallelismPerJob,omitempty"` - // MinPriorityPerJob - READ-ONLY; The minimum priority per job this user can use to submit jobs. - MinPriorityPerJob *int32 `json:"minPriorityPerJob,omitempty"` -} - -// MarshalJSON is the custom marshaler for ComputePolicyProperties. -func (cpp ComputePolicyProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CreateComputePolicyWithAccountParameters the parameters used to create a new compute policy while -// creating a new Data Lake Analytics account. -type CreateComputePolicyWithAccountParameters struct { - // Name - The unique name of the compute policy to create. - Name *string `json:"name,omitempty"` - // CreateOrUpdateComputePolicyProperties - The compute policy properties to use when creating a new compute policy. - *CreateOrUpdateComputePolicyProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateComputePolicyWithAccountParameters. -func (ccpwap CreateComputePolicyWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ccpwap.Name != nil { - objectMap["name"] = ccpwap.Name - } - if ccpwap.CreateOrUpdateComputePolicyProperties != nil { - objectMap["properties"] = ccpwap.CreateOrUpdateComputePolicyProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateComputePolicyWithAccountParameters struct. -func (ccpwap *CreateComputePolicyWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ccpwap.Name = &name - } - case "properties": - if v != nil { - var createOrUpdateComputePolicyProperties CreateOrUpdateComputePolicyProperties - err = json.Unmarshal(*v, &createOrUpdateComputePolicyProperties) - if err != nil { - return err - } - ccpwap.CreateOrUpdateComputePolicyProperties = &createOrUpdateComputePolicyProperties - } - } - } - - return nil -} - -// CreateDataLakeAnalyticsAccountParameters the parameters to use for creating a Data Lake Analytics -// account. -type CreateDataLakeAnalyticsAccountParameters struct { - // Location - The resource location. - Location *string `json:"location,omitempty"` - // Tags - The resource tags. - Tags map[string]*string `json:"tags"` - // CreateDataLakeAnalyticsAccountProperties - The Data Lake Analytics account properties to use for creating. - *CreateDataLakeAnalyticsAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateDataLakeAnalyticsAccountParameters. -func (cdlaap CreateDataLakeAnalyticsAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cdlaap.Location != nil { - objectMap["location"] = cdlaap.Location - } - if cdlaap.Tags != nil { - objectMap["tags"] = cdlaap.Tags - } - if cdlaap.CreateDataLakeAnalyticsAccountProperties != nil { - objectMap["properties"] = cdlaap.CreateDataLakeAnalyticsAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateDataLakeAnalyticsAccountParameters struct. -func (cdlaap *CreateDataLakeAnalyticsAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - cdlaap.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - cdlaap.Tags = tags - } - case "properties": - if v != nil { - var createDataLakeAnalyticsAccountProperties CreateDataLakeAnalyticsAccountProperties - err = json.Unmarshal(*v, &createDataLakeAnalyticsAccountProperties) - if err != nil { - return err - } - cdlaap.CreateDataLakeAnalyticsAccountProperties = &createDataLakeAnalyticsAccountProperties - } - } - } - - return nil -} - -// CreateDataLakeAnalyticsAccountProperties ... -type CreateDataLakeAnalyticsAccountProperties struct { - // DefaultDataLakeStoreAccount - The default Data Lake Store account associated with this account. - DefaultDataLakeStoreAccount *string `json:"defaultDataLakeStoreAccount,omitempty"` - // DataLakeStoreAccounts - The list of Data Lake Store accounts associated with this account. - DataLakeStoreAccounts *[]AddDataLakeStoreWithAccountParameters `json:"dataLakeStoreAccounts,omitempty"` - // StorageAccounts - The list of Azure Blob Storage accounts associated with this account. - StorageAccounts *[]AddStorageAccountWithAccountParameters `json:"storageAccounts,omitempty"` - // ComputePolicies - The list of compute policies associated with this account. - ComputePolicies *[]CreateComputePolicyWithAccountParameters `json:"computePolicies,omitempty"` - // FirewallRules - The list of firewall rules associated with this account. - FirewallRules *[]CreateFirewallRuleWithAccountParameters `json:"firewallRules,omitempty"` - // FirewallState - The current state of the IP address firewall for this account. Possible values include: 'FirewallStateEnabled', 'FirewallStateDisabled' - FirewallState FirewallState `json:"firewallState,omitempty"` - // FirewallAllowAzureIps - The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'Enabled', 'Disabled' - FirewallAllowAzureIps FirewallAllowAzureIpsState `json:"firewallAllowAzureIps,omitempty"` - // NewTier - The commitment tier for the next month. Possible values include: 'Consumption', 'Commitment100AUHours', 'Commitment500AUHours', 'Commitment1000AUHours', 'Commitment5000AUHours', 'Commitment10000AUHours', 'Commitment50000AUHours', 'Commitment100000AUHours', 'Commitment500000AUHours' - NewTier TierType `json:"newTier,omitempty"` - // MaxJobCount - The maximum supported jobs running under the account at the same time. - MaxJobCount *int32 `json:"maxJobCount,omitempty"` - // MaxDegreeOfParallelism - The maximum supported degree of parallelism for this account. - MaxDegreeOfParallelism *int32 `json:"maxDegreeOfParallelism,omitempty"` - // MaxDegreeOfParallelismPerJob - The maximum supported degree of parallelism per job for this account. - MaxDegreeOfParallelismPerJob *int32 `json:"maxDegreeOfParallelismPerJob,omitempty"` - // MinPriorityPerJob - The minimum supported priority per job for this account. - MinPriorityPerJob *int32 `json:"minPriorityPerJob,omitempty"` - // QueryStoreRetention - The number of days that job metadata is retained. - QueryStoreRetention *int32 `json:"queryStoreRetention,omitempty"` -} - -// CreateFirewallRuleWithAccountParameters the parameters used to create a new firewall rule while creating -// a new Data Lake Analytics account. -type CreateFirewallRuleWithAccountParameters struct { - // Name - The unique name of the firewall rule to create. - Name *string `json:"name,omitempty"` - // CreateOrUpdateFirewallRuleProperties - The firewall rule properties to use when creating a new firewall rule. - *CreateOrUpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateFirewallRuleWithAccountParameters. -func (cfrwap CreateFirewallRuleWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cfrwap.Name != nil { - objectMap["name"] = cfrwap.Name - } - if cfrwap.CreateOrUpdateFirewallRuleProperties != nil { - objectMap["properties"] = cfrwap.CreateOrUpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateFirewallRuleWithAccountParameters struct. -func (cfrwap *CreateFirewallRuleWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - cfrwap.Name = &name - } - case "properties": - if v != nil { - var createOrUpdateFirewallRuleProperties CreateOrUpdateFirewallRuleProperties - err = json.Unmarshal(*v, &createOrUpdateFirewallRuleProperties) - if err != nil { - return err - } - cfrwap.CreateOrUpdateFirewallRuleProperties = &createOrUpdateFirewallRuleProperties - } - } - } - - return nil -} - -// CreateOrUpdateComputePolicyParameters the parameters used to create a new compute policy. -type CreateOrUpdateComputePolicyParameters struct { - // CreateOrUpdateComputePolicyProperties - The compute policy properties to use when creating a new compute policy. - *CreateOrUpdateComputePolicyProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateOrUpdateComputePolicyParameters. -func (coucpp CreateOrUpdateComputePolicyParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if coucpp.CreateOrUpdateComputePolicyProperties != nil { - objectMap["properties"] = coucpp.CreateOrUpdateComputePolicyProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateOrUpdateComputePolicyParameters struct. -func (coucpp *CreateOrUpdateComputePolicyParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var createOrUpdateComputePolicyProperties CreateOrUpdateComputePolicyProperties - err = json.Unmarshal(*v, &createOrUpdateComputePolicyProperties) - if err != nil { - return err - } - coucpp.CreateOrUpdateComputePolicyProperties = &createOrUpdateComputePolicyProperties - } - } - } - - return nil -} - -// CreateOrUpdateComputePolicyProperties the compute policy properties to use when creating a new compute -// policy. -type CreateOrUpdateComputePolicyProperties struct { - // ObjectID - The AAD object identifier for the entity to create a policy for. - ObjectID *uuid.UUID `json:"objectId,omitempty"` - // ObjectType - The type of AAD object the object identifier refers to. Possible values include: 'User', 'Group', 'ServicePrincipal' - ObjectType AADObjectType `json:"objectType,omitempty"` - // MaxDegreeOfParallelismPerJob - The maximum degree of parallelism per job this user can use to submit jobs. This property, the min priority per job property, or both must be passed. - MaxDegreeOfParallelismPerJob *int32 `json:"maxDegreeOfParallelismPerJob,omitempty"` - // MinPriorityPerJob - The minimum priority per job this user can use to submit jobs. This property, the max degree of parallelism per job property, or both must be passed. - MinPriorityPerJob *int32 `json:"minPriorityPerJob,omitempty"` -} - -// CreateOrUpdateFirewallRuleParameters the parameters used to create a new firewall rule. -type CreateOrUpdateFirewallRuleParameters struct { - // CreateOrUpdateFirewallRuleProperties - The firewall rule properties to use when creating a new firewall rule. - *CreateOrUpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateOrUpdateFirewallRuleParameters. -func (coufrp CreateOrUpdateFirewallRuleParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if coufrp.CreateOrUpdateFirewallRuleProperties != nil { - objectMap["properties"] = coufrp.CreateOrUpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateOrUpdateFirewallRuleParameters struct. -func (coufrp *CreateOrUpdateFirewallRuleParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var createOrUpdateFirewallRuleProperties CreateOrUpdateFirewallRuleProperties - err = json.Unmarshal(*v, &createOrUpdateFirewallRuleProperties) - if err != nil { - return err - } - coufrp.CreateOrUpdateFirewallRuleProperties = &createOrUpdateFirewallRuleProperties - } - } - } - - return nil -} - -// CreateOrUpdateFirewallRuleProperties the firewall rule properties to use when creating a new firewall -// rule. -type CreateOrUpdateFirewallRuleProperties struct { - // StartIPAddress - The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - StartIPAddress *string `json:"startIpAddress,omitempty"` - // EndIPAddress - The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - EndIPAddress *string `json:"endIpAddress,omitempty"` -} - -// DataLakeAnalyticsAccount a Data Lake Analytics account object, containing all information associated -// with the named Data Lake Analytics account. -type DataLakeAnalyticsAccount struct { - autorest.Response `json:"-"` - // DataLakeAnalyticsAccountProperties - READ-ONLY; The properties defined by Data Lake Analytics all properties are specific to each resource provider. - *DataLakeAnalyticsAccountProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifer. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // Location - READ-ONLY; The resource location. - Location *string `json:"location,omitempty"` - // Tags - READ-ONLY; The resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DataLakeAnalyticsAccount. -func (dlaa DataLakeAnalyticsAccount) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DataLakeAnalyticsAccount struct. -func (dlaa *DataLakeAnalyticsAccount) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var dataLakeAnalyticsAccountProperties DataLakeAnalyticsAccountProperties - err = json.Unmarshal(*v, &dataLakeAnalyticsAccountProperties) - if err != nil { - return err - } - dlaa.DataLakeAnalyticsAccountProperties = &dataLakeAnalyticsAccountProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dlaa.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dlaa.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dlaa.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - dlaa.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - dlaa.Tags = tags - } - } - } - - return nil -} - -// DataLakeAnalyticsAccountBasic a Data Lake Analytics account object, containing all information -// associated with the named Data Lake Analytics account. -type DataLakeAnalyticsAccountBasic struct { - // DataLakeAnalyticsAccountPropertiesBasic - READ-ONLY; The properties defined by Data Lake Analytics all properties are specific to each resource provider. - *DataLakeAnalyticsAccountPropertiesBasic `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifer. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // Location - READ-ONLY; The resource location. - Location *string `json:"location,omitempty"` - // Tags - READ-ONLY; The resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DataLakeAnalyticsAccountBasic. -func (dlaab DataLakeAnalyticsAccountBasic) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DataLakeAnalyticsAccountBasic struct. -func (dlaab *DataLakeAnalyticsAccountBasic) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var dataLakeAnalyticsAccountPropertiesBasic DataLakeAnalyticsAccountPropertiesBasic - err = json.Unmarshal(*v, &dataLakeAnalyticsAccountPropertiesBasic) - if err != nil { - return err - } - dlaab.DataLakeAnalyticsAccountPropertiesBasic = &dataLakeAnalyticsAccountPropertiesBasic - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dlaab.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dlaab.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dlaab.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - dlaab.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - dlaab.Tags = tags - } - } - } - - return nil -} - -// DataLakeAnalyticsAccountListResult data Lake Analytics account list information. -type DataLakeAnalyticsAccountListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]DataLakeAnalyticsAccountBasic `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeAnalyticsAccountListResult. -func (dlaalr DataLakeAnalyticsAccountListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataLakeAnalyticsAccountListResultIterator provides access to a complete listing of -// DataLakeAnalyticsAccountBasic values. -type DataLakeAnalyticsAccountListResultIterator struct { - i int - page DataLakeAnalyticsAccountListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DataLakeAnalyticsAccountListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeAnalyticsAccountListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DataLakeAnalyticsAccountListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DataLakeAnalyticsAccountListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DataLakeAnalyticsAccountListResultIterator) Response() DataLakeAnalyticsAccountListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DataLakeAnalyticsAccountListResultIterator) Value() DataLakeAnalyticsAccountBasic { - if !iter.page.NotDone() { - return DataLakeAnalyticsAccountBasic{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DataLakeAnalyticsAccountListResultIterator type. -func NewDataLakeAnalyticsAccountListResultIterator(page DataLakeAnalyticsAccountListResultPage) DataLakeAnalyticsAccountListResultIterator { - return DataLakeAnalyticsAccountListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dlaalr DataLakeAnalyticsAccountListResult) IsEmpty() bool { - return dlaalr.Value == nil || len(*dlaalr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dlaalr DataLakeAnalyticsAccountListResult) hasNextLink() bool { - return dlaalr.NextLink != nil && len(*dlaalr.NextLink) != 0 -} - -// dataLakeAnalyticsAccountListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dlaalr DataLakeAnalyticsAccountListResult) dataLakeAnalyticsAccountListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dlaalr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dlaalr.NextLink))) -} - -// DataLakeAnalyticsAccountListResultPage contains a page of DataLakeAnalyticsAccountBasic values. -type DataLakeAnalyticsAccountListResultPage struct { - fn func(context.Context, DataLakeAnalyticsAccountListResult) (DataLakeAnalyticsAccountListResult, error) - dlaalr DataLakeAnalyticsAccountListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DataLakeAnalyticsAccountListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeAnalyticsAccountListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dlaalr) - if err != nil { - return err - } - page.dlaalr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DataLakeAnalyticsAccountListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DataLakeAnalyticsAccountListResultPage) NotDone() bool { - return !page.dlaalr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DataLakeAnalyticsAccountListResultPage) Response() DataLakeAnalyticsAccountListResult { - return page.dlaalr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DataLakeAnalyticsAccountListResultPage) Values() []DataLakeAnalyticsAccountBasic { - if page.dlaalr.IsEmpty() { - return nil - } - return *page.dlaalr.Value -} - -// Creates a new instance of the DataLakeAnalyticsAccountListResultPage type. -func NewDataLakeAnalyticsAccountListResultPage(cur DataLakeAnalyticsAccountListResult, getNextPage func(context.Context, DataLakeAnalyticsAccountListResult) (DataLakeAnalyticsAccountListResult, error)) DataLakeAnalyticsAccountListResultPage { - return DataLakeAnalyticsAccountListResultPage{ - fn: getNextPage, - dlaalr: cur, - } -} - -// DataLakeAnalyticsAccountProperties the account specific properties that are associated with an -// underlying Data Lake Analytics account. Returned only when retrieving a specific account. -type DataLakeAnalyticsAccountProperties struct { - // DefaultDataLakeStoreAccount - READ-ONLY; The default Data Lake Store account associated with this account. - DefaultDataLakeStoreAccount *string `json:"defaultDataLakeStoreAccount,omitempty"` - // DataLakeStoreAccounts - READ-ONLY; The list of Data Lake Store accounts associated with this account. - DataLakeStoreAccounts *[]DataLakeStoreAccountInformation `json:"dataLakeStoreAccounts,omitempty"` - // StorageAccounts - READ-ONLY; The list of Azure Blob Storage accounts associated with this account. - StorageAccounts *[]StorageAccountInformation `json:"storageAccounts,omitempty"` - // ComputePolicies - READ-ONLY; The list of compute policies associated with this account. - ComputePolicies *[]ComputePolicy `json:"computePolicies,omitempty"` - // FirewallRules - READ-ONLY; The list of firewall rules associated with this account. - FirewallRules *[]FirewallRule `json:"firewallRules,omitempty"` - // FirewallState - READ-ONLY; The current state of the IP address firewall for this account. Possible values include: 'FirewallStateEnabled', 'FirewallStateDisabled' - FirewallState FirewallState `json:"firewallState,omitempty"` - // FirewallAllowAzureIps - READ-ONLY; The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'Enabled', 'Disabled' - FirewallAllowAzureIps FirewallAllowAzureIpsState `json:"firewallAllowAzureIps,omitempty"` - // NewTier - READ-ONLY; The commitment tier for the next month. Possible values include: 'Consumption', 'Commitment100AUHours', 'Commitment500AUHours', 'Commitment1000AUHours', 'Commitment5000AUHours', 'Commitment10000AUHours', 'Commitment50000AUHours', 'Commitment100000AUHours', 'Commitment500000AUHours' - NewTier TierType `json:"newTier,omitempty"` - // CurrentTier - READ-ONLY; The commitment tier in use for the current month. Possible values include: 'Consumption', 'Commitment100AUHours', 'Commitment500AUHours', 'Commitment1000AUHours', 'Commitment5000AUHours', 'Commitment10000AUHours', 'Commitment50000AUHours', 'Commitment100000AUHours', 'Commitment500000AUHours' - CurrentTier TierType `json:"currentTier,omitempty"` - // MaxJobCount - READ-ONLY; The maximum supported jobs running under the account at the same time. - MaxJobCount *int32 `json:"maxJobCount,omitempty"` - // SystemMaxJobCount - READ-ONLY; The system defined maximum supported jobs running under the account at the same time, which restricts the maximum number of running jobs the user can set for the account. - SystemMaxJobCount *int32 `json:"systemMaxJobCount,omitempty"` - // MaxDegreeOfParallelism - READ-ONLY; The maximum supported degree of parallelism for this account. - MaxDegreeOfParallelism *int32 `json:"maxDegreeOfParallelism,omitempty"` - // SystemMaxDegreeOfParallelism - READ-ONLY; The system defined maximum supported degree of parallelism for this account, which restricts the maximum value of parallelism the user can set for the account. - SystemMaxDegreeOfParallelism *int32 `json:"systemMaxDegreeOfParallelism,omitempty"` - // MaxDegreeOfParallelismPerJob - READ-ONLY; The maximum supported degree of parallelism per job for this account. - MaxDegreeOfParallelismPerJob *int32 `json:"maxDegreeOfParallelismPerJob,omitempty"` - // MinPriorityPerJob - READ-ONLY; The minimum supported priority per job for this account. - MinPriorityPerJob *int32 `json:"minPriorityPerJob,omitempty"` - // QueryStoreRetention - READ-ONLY; The number of days that job metadata is retained. - QueryStoreRetention *int32 `json:"queryStoreRetention,omitempty"` - // AccountID - READ-ONLY; The unique identifier associated with this Data Lake Analytics account. - AccountID *uuid.UUID `json:"accountId,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning status of the Data Lake Analytics account. Possible values include: 'Failed', 'Creating', 'Running', 'Succeeded', 'Patching', 'Suspending', 'Resuming', 'Deleting', 'Deleted', 'Undeleting', 'Canceled' - ProvisioningState DataLakeAnalyticsAccountStatus `json:"provisioningState,omitempty"` - // State - READ-ONLY; The state of the Data Lake Analytics account. Possible values include: 'Active', 'Suspended' - State DataLakeAnalyticsAccountState `json:"state,omitempty"` - // CreationTime - READ-ONLY; The account creation time. - CreationTime *date.Time `json:"creationTime,omitempty"` - // LastModifiedTime - READ-ONLY; The account last modified time. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Endpoint - READ-ONLY; The full CName endpoint for this account. - Endpoint *string `json:"endpoint,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeAnalyticsAccountProperties. -func (dlaap DataLakeAnalyticsAccountProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataLakeAnalyticsAccountPropertiesBasic the basic account specific properties that are associated with -// an underlying Data Lake Analytics account. -type DataLakeAnalyticsAccountPropertiesBasic struct { - // AccountID - READ-ONLY; The unique identifier associated with this Data Lake Analytics account. - AccountID *uuid.UUID `json:"accountId,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning status of the Data Lake Analytics account. Possible values include: 'Failed', 'Creating', 'Running', 'Succeeded', 'Patching', 'Suspending', 'Resuming', 'Deleting', 'Deleted', 'Undeleting', 'Canceled' - ProvisioningState DataLakeAnalyticsAccountStatus `json:"provisioningState,omitempty"` - // State - READ-ONLY; The state of the Data Lake Analytics account. Possible values include: 'Active', 'Suspended' - State DataLakeAnalyticsAccountState `json:"state,omitempty"` - // CreationTime - READ-ONLY; The account creation time. - CreationTime *date.Time `json:"creationTime,omitempty"` - // LastModifiedTime - READ-ONLY; The account last modified time. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Endpoint - READ-ONLY; The full CName endpoint for this account. - Endpoint *string `json:"endpoint,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeAnalyticsAccountPropertiesBasic. -func (dlaapb DataLakeAnalyticsAccountPropertiesBasic) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataLakeStoreAccountInformation data Lake Store account information. -type DataLakeStoreAccountInformation struct { - autorest.Response `json:"-"` - // DataLakeStoreAccountInformationProperties - READ-ONLY; The Data Lake Store account properties. - *DataLakeStoreAccountInformationProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountInformation. -func (dlsai DataLakeStoreAccountInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DataLakeStoreAccountInformation struct. -func (dlsai *DataLakeStoreAccountInformation) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var dataLakeStoreAccountInformationProperties DataLakeStoreAccountInformationProperties - err = json.Unmarshal(*v, &dataLakeStoreAccountInformationProperties) - if err != nil { - return err - } - dlsai.DataLakeStoreAccountInformationProperties = &dataLakeStoreAccountInformationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dlsai.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dlsai.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dlsai.Type = &typeVar - } - } - } - - return nil -} - -// DataLakeStoreAccountInformationListResult data Lake Store account list information. -type DataLakeStoreAccountInformationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]DataLakeStoreAccountInformation `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountInformationListResult. -func (dlsailr DataLakeStoreAccountInformationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataLakeStoreAccountInformationListResultIterator provides access to a complete listing of -// DataLakeStoreAccountInformation values. -type DataLakeStoreAccountInformationListResultIterator struct { - i int - page DataLakeStoreAccountInformationListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DataLakeStoreAccountInformationListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountInformationListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DataLakeStoreAccountInformationListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DataLakeStoreAccountInformationListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DataLakeStoreAccountInformationListResultIterator) Response() DataLakeStoreAccountInformationListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DataLakeStoreAccountInformationListResultIterator) Value() DataLakeStoreAccountInformation { - if !iter.page.NotDone() { - return DataLakeStoreAccountInformation{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DataLakeStoreAccountInformationListResultIterator type. -func NewDataLakeStoreAccountInformationListResultIterator(page DataLakeStoreAccountInformationListResultPage) DataLakeStoreAccountInformationListResultIterator { - return DataLakeStoreAccountInformationListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dlsailr DataLakeStoreAccountInformationListResult) IsEmpty() bool { - return dlsailr.Value == nil || len(*dlsailr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dlsailr DataLakeStoreAccountInformationListResult) hasNextLink() bool { - return dlsailr.NextLink != nil && len(*dlsailr.NextLink) != 0 -} - -// dataLakeStoreAccountInformationListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dlsailr DataLakeStoreAccountInformationListResult) dataLakeStoreAccountInformationListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dlsailr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dlsailr.NextLink))) -} - -// DataLakeStoreAccountInformationListResultPage contains a page of DataLakeStoreAccountInformation values. -type DataLakeStoreAccountInformationListResultPage struct { - fn func(context.Context, DataLakeStoreAccountInformationListResult) (DataLakeStoreAccountInformationListResult, error) - dlsailr DataLakeStoreAccountInformationListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DataLakeStoreAccountInformationListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountInformationListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dlsailr) - if err != nil { - return err - } - page.dlsailr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DataLakeStoreAccountInformationListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DataLakeStoreAccountInformationListResultPage) NotDone() bool { - return !page.dlsailr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DataLakeStoreAccountInformationListResultPage) Response() DataLakeStoreAccountInformationListResult { - return page.dlsailr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DataLakeStoreAccountInformationListResultPage) Values() []DataLakeStoreAccountInformation { - if page.dlsailr.IsEmpty() { - return nil - } - return *page.dlsailr.Value -} - -// Creates a new instance of the DataLakeStoreAccountInformationListResultPage type. -func NewDataLakeStoreAccountInformationListResultPage(cur DataLakeStoreAccountInformationListResult, getNextPage func(context.Context, DataLakeStoreAccountInformationListResult) (DataLakeStoreAccountInformationListResult, error)) DataLakeStoreAccountInformationListResultPage { - return DataLakeStoreAccountInformationListResultPage{ - fn: getNextPage, - dlsailr: cur, - } -} - -// DataLakeStoreAccountInformationProperties the Data Lake Store account properties. -type DataLakeStoreAccountInformationProperties struct { - // Suffix - READ-ONLY; The optional suffix for the Data Lake Store account. - Suffix *string `json:"suffix,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountInformationProperties. -func (dlsaip DataLakeStoreAccountInformationProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// FirewallRule data Lake Analytics firewall rule information. -type FirewallRule struct { - autorest.Response `json:"-"` - // FirewallRuleProperties - READ-ONLY; The firewall rule properties. - *FirewallRuleProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for FirewallRule. -func (fr FirewallRule) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for FirewallRule struct. -func (fr *FirewallRule) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var firewallRuleProperties FirewallRuleProperties - err = json.Unmarshal(*v, &firewallRuleProperties) - if err != nil { - return err - } - fr.FirewallRuleProperties = &firewallRuleProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - fr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - fr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - fr.Type = &typeVar - } - } - } - - return nil -} - -// FirewallRuleListResult data Lake Analytics firewall rule list information. -type FirewallRuleListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]FirewallRule `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for FirewallRuleListResult. -func (frlr FirewallRuleListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// FirewallRuleListResultIterator provides access to a complete listing of FirewallRule values. -type FirewallRuleListResultIterator struct { - i int - page FirewallRuleListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *FirewallRuleListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRuleListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *FirewallRuleListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter FirewallRuleListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter FirewallRuleListResultIterator) Response() FirewallRuleListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter FirewallRuleListResultIterator) Value() FirewallRule { - if !iter.page.NotDone() { - return FirewallRule{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the FirewallRuleListResultIterator type. -func NewFirewallRuleListResultIterator(page FirewallRuleListResultPage) FirewallRuleListResultIterator { - return FirewallRuleListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (frlr FirewallRuleListResult) IsEmpty() bool { - return frlr.Value == nil || len(*frlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (frlr FirewallRuleListResult) hasNextLink() bool { - return frlr.NextLink != nil && len(*frlr.NextLink) != 0 -} - -// firewallRuleListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (frlr FirewallRuleListResult) firewallRuleListResultPreparer(ctx context.Context) (*http.Request, error) { - if !frlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(frlr.NextLink))) -} - -// FirewallRuleListResultPage contains a page of FirewallRule values. -type FirewallRuleListResultPage struct { - fn func(context.Context, FirewallRuleListResult) (FirewallRuleListResult, error) - frlr FirewallRuleListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *FirewallRuleListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRuleListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.frlr) - if err != nil { - return err - } - page.frlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *FirewallRuleListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page FirewallRuleListResultPage) NotDone() bool { - return !page.frlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page FirewallRuleListResultPage) Response() FirewallRuleListResult { - return page.frlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page FirewallRuleListResultPage) Values() []FirewallRule { - if page.frlr.IsEmpty() { - return nil - } - return *page.frlr.Value -} - -// Creates a new instance of the FirewallRuleListResultPage type. -func NewFirewallRuleListResultPage(cur FirewallRuleListResult, getNextPage func(context.Context, FirewallRuleListResult) (FirewallRuleListResult, error)) FirewallRuleListResultPage { - return FirewallRuleListResultPage{ - fn: getNextPage, - frlr: cur, - } -} - -// FirewallRuleProperties the firewall rule properties. -type FirewallRuleProperties struct { - // StartIPAddress - READ-ONLY; The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - StartIPAddress *string `json:"startIpAddress,omitempty"` - // EndIPAddress - READ-ONLY; The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - EndIPAddress *string `json:"endIpAddress,omitempty"` -} - -// MarshalJSON is the custom marshaler for FirewallRuleProperties. -func (frp FirewallRuleProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// NameAvailabilityInformation data Lake Analytics account name availability result information. -type NameAvailabilityInformation struct { - autorest.Response `json:"-"` - // NameAvailable - READ-ONLY; The Boolean value of true or false to indicate whether the Data Lake Analytics account name is available or not. - NameAvailable *bool `json:"nameAvailable,omitempty"` - // Reason - READ-ONLY; The reason why the Data Lake Analytics account name is not available, if nameAvailable is false. - Reason *string `json:"reason,omitempty"` - // Message - READ-ONLY; The message describing why the Data Lake Analytics account name is not available, if nameAvailable is false. - Message *string `json:"message,omitempty"` -} - -// MarshalJSON is the custom marshaler for NameAvailabilityInformation. -func (nai NameAvailabilityInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Operation an available operation for Data Lake Analytics. -type Operation struct { - // Name - READ-ONLY; The name of the operation. - Name *string `json:"name,omitempty"` - // Display - READ-ONLY; The display information for the operation. - Display *OperationDisplay `json:"display,omitempty"` - // Origin - READ-ONLY; The intended executor of the operation. Possible values include: 'OperationOriginUser', 'OperationOriginSystem', 'OperationOriginUsersystem' - Origin OperationOrigin `json:"origin,omitempty"` -} - -// MarshalJSON is the custom marshaler for Operation. -func (o Operation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OperationDisplay the display information for a particular operation. -type OperationDisplay struct { - // Provider - READ-ONLY; The resource provider of the operation. - Provider *string `json:"provider,omitempty"` - // Resource - READ-ONLY; The resource type of the operation. - Resource *string `json:"resource,omitempty"` - // Operation - READ-ONLY; A friendly name of the operation. - Operation *string `json:"operation,omitempty"` - // Description - READ-ONLY; A friendly description of the operation. - Description *string `json:"description,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationDisplay. -func (od OperationDisplay) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OperationListResult the list of available operations for Data Lake Analytics. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]Operation `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationListResult. -func (olr OperationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Resource the resource model definition. -type Resource struct { - // ID - READ-ONLY; The resource identifer. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // Location - READ-ONLY; The resource location. - Location *string `json:"location,omitempty"` - // Tags - READ-ONLY; The resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasTokenInformation SAS token information. -type SasTokenInformation struct { - // AccessToken - READ-ONLY; The access token for the associated Azure Storage Container. - AccessToken *string `json:"accessToken,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasTokenInformation. -func (sti SasTokenInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasTokenInformationListResult the SAS response that contains the storage account, container and -// associated SAS token for connection use. -type SasTokenInformationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]SasTokenInformation `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasTokenInformationListResult. -func (stilr SasTokenInformationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasTokenInformationListResultIterator provides access to a complete listing of SasTokenInformation -// values. -type SasTokenInformationListResultIterator struct { - i int - page SasTokenInformationListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SasTokenInformationListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasTokenInformationListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SasTokenInformationListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SasTokenInformationListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SasTokenInformationListResultIterator) Response() SasTokenInformationListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SasTokenInformationListResultIterator) Value() SasTokenInformation { - if !iter.page.NotDone() { - return SasTokenInformation{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SasTokenInformationListResultIterator type. -func NewSasTokenInformationListResultIterator(page SasTokenInformationListResultPage) SasTokenInformationListResultIterator { - return SasTokenInformationListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (stilr SasTokenInformationListResult) IsEmpty() bool { - return stilr.Value == nil || len(*stilr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (stilr SasTokenInformationListResult) hasNextLink() bool { - return stilr.NextLink != nil && len(*stilr.NextLink) != 0 -} - -// sasTokenInformationListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (stilr SasTokenInformationListResult) sasTokenInformationListResultPreparer(ctx context.Context) (*http.Request, error) { - if !stilr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(stilr.NextLink))) -} - -// SasTokenInformationListResultPage contains a page of SasTokenInformation values. -type SasTokenInformationListResultPage struct { - fn func(context.Context, SasTokenInformationListResult) (SasTokenInformationListResult, error) - stilr SasTokenInformationListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SasTokenInformationListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasTokenInformationListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.stilr) - if err != nil { - return err - } - page.stilr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SasTokenInformationListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SasTokenInformationListResultPage) NotDone() bool { - return !page.stilr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SasTokenInformationListResultPage) Response() SasTokenInformationListResult { - return page.stilr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SasTokenInformationListResultPage) Values() []SasTokenInformation { - if page.stilr.IsEmpty() { - return nil - } - return *page.stilr.Value -} - -// Creates a new instance of the SasTokenInformationListResultPage type. -func NewSasTokenInformationListResultPage(cur SasTokenInformationListResult, getNextPage func(context.Context, SasTokenInformationListResult) (SasTokenInformationListResult, error)) SasTokenInformationListResultPage { - return SasTokenInformationListResultPage{ - fn: getNextPage, - stilr: cur, - } -} - -// StorageAccountInformation azure Storage account information. -type StorageAccountInformation struct { - autorest.Response `json:"-"` - // StorageAccountInformationProperties - READ-ONLY; The Azure Storage account properties. - *StorageAccountInformationProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageAccountInformation. -func (sai StorageAccountInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for StorageAccountInformation struct. -func (sai *StorageAccountInformation) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var storageAccountInformationProperties StorageAccountInformationProperties - err = json.Unmarshal(*v, &storageAccountInformationProperties) - if err != nil { - return err - } - sai.StorageAccountInformationProperties = &storageAccountInformationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - sai.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - sai.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - sai.Type = &typeVar - } - } - } - - return nil -} - -// StorageAccountInformationListResult azure Storage account list information. -type StorageAccountInformationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]StorageAccountInformation `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageAccountInformationListResult. -func (sailr StorageAccountInformationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageAccountInformationListResultIterator provides access to a complete listing of -// StorageAccountInformation values. -type StorageAccountInformationListResultIterator struct { - i int - page StorageAccountInformationListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *StorageAccountInformationListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountInformationListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *StorageAccountInformationListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter StorageAccountInformationListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter StorageAccountInformationListResultIterator) Response() StorageAccountInformationListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter StorageAccountInformationListResultIterator) Value() StorageAccountInformation { - if !iter.page.NotDone() { - return StorageAccountInformation{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the StorageAccountInformationListResultIterator type. -func NewStorageAccountInformationListResultIterator(page StorageAccountInformationListResultPage) StorageAccountInformationListResultIterator { - return StorageAccountInformationListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (sailr StorageAccountInformationListResult) IsEmpty() bool { - return sailr.Value == nil || len(*sailr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (sailr StorageAccountInformationListResult) hasNextLink() bool { - return sailr.NextLink != nil && len(*sailr.NextLink) != 0 -} - -// storageAccountInformationListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sailr StorageAccountInformationListResult) storageAccountInformationListResultPreparer(ctx context.Context) (*http.Request, error) { - if !sailr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sailr.NextLink))) -} - -// StorageAccountInformationListResultPage contains a page of StorageAccountInformation values. -type StorageAccountInformationListResultPage struct { - fn func(context.Context, StorageAccountInformationListResult) (StorageAccountInformationListResult, error) - sailr StorageAccountInformationListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *StorageAccountInformationListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountInformationListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.sailr) - if err != nil { - return err - } - page.sailr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *StorageAccountInformationListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page StorageAccountInformationListResultPage) NotDone() bool { - return !page.sailr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page StorageAccountInformationListResultPage) Response() StorageAccountInformationListResult { - return page.sailr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page StorageAccountInformationListResultPage) Values() []StorageAccountInformation { - if page.sailr.IsEmpty() { - return nil - } - return *page.sailr.Value -} - -// Creates a new instance of the StorageAccountInformationListResultPage type. -func NewStorageAccountInformationListResultPage(cur StorageAccountInformationListResult, getNextPage func(context.Context, StorageAccountInformationListResult) (StorageAccountInformationListResult, error)) StorageAccountInformationListResultPage { - return StorageAccountInformationListResultPage{ - fn: getNextPage, - sailr: cur, - } -} - -// StorageAccountInformationProperties the Azure Storage account properties. -type StorageAccountInformationProperties struct { - // Suffix - READ-ONLY; The optional suffix for the storage account. - Suffix *string `json:"suffix,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageAccountInformationProperties. -func (saip StorageAccountInformationProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageContainer azure Storage blob container information. -type StorageContainer struct { - autorest.Response `json:"-"` - // StorageContainerProperties - READ-ONLY; The properties of the blob container. - *StorageContainerProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageContainer. -func (sc StorageContainer) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for StorageContainer struct. -func (sc *StorageContainer) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var storageContainerProperties StorageContainerProperties - err = json.Unmarshal(*v, &storageContainerProperties) - if err != nil { - return err - } - sc.StorageContainerProperties = &storageContainerProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - sc.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - sc.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - sc.Type = &typeVar - } - } - } - - return nil -} - -// StorageContainerListResult the list of blob containers associated with the storage account attached to -// the Data Lake Analytics account. -type StorageContainerListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]StorageContainer `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageContainerListResult. -func (sclr StorageContainerListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageContainerListResultIterator provides access to a complete listing of StorageContainer values. -type StorageContainerListResultIterator struct { - i int - page StorageContainerListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *StorageContainerListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageContainerListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *StorageContainerListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter StorageContainerListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter StorageContainerListResultIterator) Response() StorageContainerListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter StorageContainerListResultIterator) Value() StorageContainer { - if !iter.page.NotDone() { - return StorageContainer{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the StorageContainerListResultIterator type. -func NewStorageContainerListResultIterator(page StorageContainerListResultPage) StorageContainerListResultIterator { - return StorageContainerListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (sclr StorageContainerListResult) IsEmpty() bool { - return sclr.Value == nil || len(*sclr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (sclr StorageContainerListResult) hasNextLink() bool { - return sclr.NextLink != nil && len(*sclr.NextLink) != 0 -} - -// storageContainerListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sclr StorageContainerListResult) storageContainerListResultPreparer(ctx context.Context) (*http.Request, error) { - if !sclr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sclr.NextLink))) -} - -// StorageContainerListResultPage contains a page of StorageContainer values. -type StorageContainerListResultPage struct { - fn func(context.Context, StorageContainerListResult) (StorageContainerListResult, error) - sclr StorageContainerListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *StorageContainerListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageContainerListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.sclr) - if err != nil { - return err - } - page.sclr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *StorageContainerListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page StorageContainerListResultPage) NotDone() bool { - return !page.sclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page StorageContainerListResultPage) Response() StorageContainerListResult { - return page.sclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page StorageContainerListResultPage) Values() []StorageContainer { - if page.sclr.IsEmpty() { - return nil - } - return *page.sclr.Value -} - -// Creates a new instance of the StorageContainerListResultPage type. -func NewStorageContainerListResultPage(cur StorageContainerListResult, getNextPage func(context.Context, StorageContainerListResult) (StorageContainerListResult, error)) StorageContainerListResultPage { - return StorageContainerListResultPage{ - fn: getNextPage, - sclr: cur, - } -} - -// StorageContainerProperties azure Storage blob container properties information. -type StorageContainerProperties struct { - // LastModifiedTime - READ-ONLY; The last modified time of the blob container. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageContainerProperties. -func (scp StorageContainerProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SubResource the resource model definition for a nested resource. -type SubResource struct { - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for SubResource. -func (sr SubResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UpdateComputePolicyParameters the parameters used to update a compute policy. -type UpdateComputePolicyParameters struct { - // UpdateComputePolicyProperties - The compute policy properties to use when updating a compute policy. - *UpdateComputePolicyProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateComputePolicyParameters. -func (ucpp UpdateComputePolicyParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ucpp.UpdateComputePolicyProperties != nil { - objectMap["properties"] = ucpp.UpdateComputePolicyProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateComputePolicyParameters struct. -func (ucpp *UpdateComputePolicyParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var updateComputePolicyProperties UpdateComputePolicyProperties - err = json.Unmarshal(*v, &updateComputePolicyProperties) - if err != nil { - return err - } - ucpp.UpdateComputePolicyProperties = &updateComputePolicyProperties - } - } - } - - return nil -} - -// UpdateComputePolicyProperties the compute policy properties to use when updating a compute policy. -type UpdateComputePolicyProperties struct { - // ObjectID - The AAD object identifier for the entity to create a policy for. - ObjectID *uuid.UUID `json:"objectId,omitempty"` - // ObjectType - The type of AAD object the object identifier refers to. Possible values include: 'User', 'Group', 'ServicePrincipal' - ObjectType AADObjectType `json:"objectType,omitempty"` - // MaxDegreeOfParallelismPerJob - The maximum degree of parallelism per job this user can use to submit jobs. This property, the min priority per job property, or both must be passed. - MaxDegreeOfParallelismPerJob *int32 `json:"maxDegreeOfParallelismPerJob,omitempty"` - // MinPriorityPerJob - The minimum priority per job this user can use to submit jobs. This property, the max degree of parallelism per job property, or both must be passed. - MinPriorityPerJob *int32 `json:"minPriorityPerJob,omitempty"` -} - -// UpdateComputePolicyWithAccountParameters the parameters used to update a compute policy while updating a -// Data Lake Analytics account. -type UpdateComputePolicyWithAccountParameters struct { - // Name - The unique name of the compute policy to update. - Name *string `json:"name,omitempty"` - // UpdateComputePolicyProperties - The compute policy properties to use when updating a compute policy. - *UpdateComputePolicyProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateComputePolicyWithAccountParameters. -func (ucpwap UpdateComputePolicyWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ucpwap.Name != nil { - objectMap["name"] = ucpwap.Name - } - if ucpwap.UpdateComputePolicyProperties != nil { - objectMap["properties"] = ucpwap.UpdateComputePolicyProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateComputePolicyWithAccountParameters struct. -func (ucpwap *UpdateComputePolicyWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ucpwap.Name = &name - } - case "properties": - if v != nil { - var updateComputePolicyProperties UpdateComputePolicyProperties - err = json.Unmarshal(*v, &updateComputePolicyProperties) - if err != nil { - return err - } - ucpwap.UpdateComputePolicyProperties = &updateComputePolicyProperties - } - } - } - - return nil -} - -// UpdateDataLakeAnalyticsAccountParameters the parameters that can be used to update an existing Data Lake -// Analytics account. -type UpdateDataLakeAnalyticsAccountParameters struct { - // Tags - The resource tags. - Tags map[string]*string `json:"tags"` - // UpdateDataLakeAnalyticsAccountProperties - The properties that can be updated in an existing Data Lake Analytics account. - *UpdateDataLakeAnalyticsAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateDataLakeAnalyticsAccountParameters. -func (udlaap UpdateDataLakeAnalyticsAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if udlaap.Tags != nil { - objectMap["tags"] = udlaap.Tags - } - if udlaap.UpdateDataLakeAnalyticsAccountProperties != nil { - objectMap["properties"] = udlaap.UpdateDataLakeAnalyticsAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateDataLakeAnalyticsAccountParameters struct. -func (udlaap *UpdateDataLakeAnalyticsAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - udlaap.Tags = tags - } - case "properties": - if v != nil { - var updateDataLakeAnalyticsAccountProperties UpdateDataLakeAnalyticsAccountProperties - err = json.Unmarshal(*v, &updateDataLakeAnalyticsAccountProperties) - if err != nil { - return err - } - udlaap.UpdateDataLakeAnalyticsAccountProperties = &updateDataLakeAnalyticsAccountProperties - } - } - } - - return nil -} - -// UpdateDataLakeAnalyticsAccountProperties the properties to update that are associated with an underlying -// Data Lake Analytics account. -type UpdateDataLakeAnalyticsAccountProperties struct { - // DataLakeStoreAccounts - The list of Data Lake Store accounts associated with this account. - DataLakeStoreAccounts *[]UpdateDataLakeStoreWithAccountParameters `json:"dataLakeStoreAccounts,omitempty"` - // StorageAccounts - The list of Azure Blob storage accounts associated with this account. - StorageAccounts *[]UpdateStorageAccountWithAccountParameters `json:"storageAccounts,omitempty"` - // ComputePolicies - The list of compute policies associated with this account. - ComputePolicies *[]UpdateComputePolicyWithAccountParameters `json:"computePolicies,omitempty"` - // FirewallRules - The list of firewall rules associated with this account. - FirewallRules *[]UpdateFirewallRuleWithAccountParameters `json:"firewallRules,omitempty"` - // FirewallState - The current state of the IP address firewall for this account. Disabling the firewall does not remove existing rules, they will just be ignored until the firewall is re-enabled. Possible values include: 'FirewallStateEnabled', 'FirewallStateDisabled' - FirewallState FirewallState `json:"firewallState,omitempty"` - // FirewallAllowAzureIps - The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'Enabled', 'Disabled' - FirewallAllowAzureIps FirewallAllowAzureIpsState `json:"firewallAllowAzureIps,omitempty"` - // NewTier - The commitment tier to use for next month. Possible values include: 'Consumption', 'Commitment100AUHours', 'Commitment500AUHours', 'Commitment1000AUHours', 'Commitment5000AUHours', 'Commitment10000AUHours', 'Commitment50000AUHours', 'Commitment100000AUHours', 'Commitment500000AUHours' - NewTier TierType `json:"newTier,omitempty"` - // MaxJobCount - The maximum supported jobs running under the account at the same time. - MaxJobCount *int32 `json:"maxJobCount,omitempty"` - // MaxDegreeOfParallelism - The maximum supported degree of parallelism for this account. - MaxDegreeOfParallelism *int32 `json:"maxDegreeOfParallelism,omitempty"` - // MaxDegreeOfParallelismPerJob - The maximum supported degree of parallelism per job for this account. - MaxDegreeOfParallelismPerJob *int32 `json:"maxDegreeOfParallelismPerJob,omitempty"` - // MinPriorityPerJob - The minimum supported priority per job for this account. - MinPriorityPerJob *int32 `json:"minPriorityPerJob,omitempty"` - // QueryStoreRetention - The number of days that job metadata is retained. - QueryStoreRetention *int32 `json:"queryStoreRetention,omitempty"` -} - -// UpdateDataLakeStoreProperties the Data Lake Store account properties to use when updating a Data Lake -// Store account. -type UpdateDataLakeStoreProperties struct { - // Suffix - The optional suffix for the Data Lake Store account. - Suffix *string `json:"suffix,omitempty"` -} - -// UpdateDataLakeStoreWithAccountParameters the parameters used to update a Data Lake Store account while -// updating a Data Lake Analytics account. -type UpdateDataLakeStoreWithAccountParameters struct { - // Name - The unique name of the Data Lake Store account to update. - Name *string `json:"name,omitempty"` - // UpdateDataLakeStoreProperties - The Data Lake Store account properties to use when updating a Data Lake Store account. - *UpdateDataLakeStoreProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateDataLakeStoreWithAccountParameters. -func (udlswap UpdateDataLakeStoreWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if udlswap.Name != nil { - objectMap["name"] = udlswap.Name - } - if udlswap.UpdateDataLakeStoreProperties != nil { - objectMap["properties"] = udlswap.UpdateDataLakeStoreProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateDataLakeStoreWithAccountParameters struct. -func (udlswap *UpdateDataLakeStoreWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - udlswap.Name = &name - } - case "properties": - if v != nil { - var updateDataLakeStoreProperties UpdateDataLakeStoreProperties - err = json.Unmarshal(*v, &updateDataLakeStoreProperties) - if err != nil { - return err - } - udlswap.UpdateDataLakeStoreProperties = &updateDataLakeStoreProperties - } - } - } - - return nil -} - -// UpdateFirewallRuleParameters the parameters used to update a firewall rule. -type UpdateFirewallRuleParameters struct { - // UpdateFirewallRuleProperties - The firewall rule properties to use when updating a firewall rule. - *UpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateFirewallRuleParameters. -func (ufrp UpdateFirewallRuleParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ufrp.UpdateFirewallRuleProperties != nil { - objectMap["properties"] = ufrp.UpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateFirewallRuleParameters struct. -func (ufrp *UpdateFirewallRuleParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var updateFirewallRuleProperties UpdateFirewallRuleProperties - err = json.Unmarshal(*v, &updateFirewallRuleProperties) - if err != nil { - return err - } - ufrp.UpdateFirewallRuleProperties = &updateFirewallRuleProperties - } - } - } - - return nil -} - -// UpdateFirewallRuleProperties the firewall rule properties to use when updating a firewall rule. -type UpdateFirewallRuleProperties struct { - // StartIPAddress - The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - StartIPAddress *string `json:"startIpAddress,omitempty"` - // EndIPAddress - The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - EndIPAddress *string `json:"endIpAddress,omitempty"` -} - -// UpdateFirewallRuleWithAccountParameters the parameters used to update a firewall rule while updating a -// Data Lake Analytics account. -type UpdateFirewallRuleWithAccountParameters struct { - // Name - The unique name of the firewall rule to update. - Name *string `json:"name,omitempty"` - // UpdateFirewallRuleProperties - The firewall rule properties to use when updating a firewall rule. - *UpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateFirewallRuleWithAccountParameters. -func (ufrwap UpdateFirewallRuleWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ufrwap.Name != nil { - objectMap["name"] = ufrwap.Name - } - if ufrwap.UpdateFirewallRuleProperties != nil { - objectMap["properties"] = ufrwap.UpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateFirewallRuleWithAccountParameters struct. -func (ufrwap *UpdateFirewallRuleWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ufrwap.Name = &name - } - case "properties": - if v != nil { - var updateFirewallRuleProperties UpdateFirewallRuleProperties - err = json.Unmarshal(*v, &updateFirewallRuleProperties) - if err != nil { - return err - } - ufrwap.UpdateFirewallRuleProperties = &updateFirewallRuleProperties - } - } - } - - return nil -} - -// UpdateStorageAccountParameters the parameters used to update an Azure Storage account. -type UpdateStorageAccountParameters struct { - // UpdateStorageAccountProperties - The Azure Storage account properties to use when updating an Azure Storage account. - *UpdateStorageAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateStorageAccountParameters. -func (usap UpdateStorageAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if usap.UpdateStorageAccountProperties != nil { - objectMap["properties"] = usap.UpdateStorageAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateStorageAccountParameters struct. -func (usap *UpdateStorageAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var updateStorageAccountProperties UpdateStorageAccountProperties - err = json.Unmarshal(*v, &updateStorageAccountProperties) - if err != nil { - return err - } - usap.UpdateStorageAccountProperties = &updateStorageAccountProperties - } - } - } - - return nil -} - -// UpdateStorageAccountProperties the Azure Storage account properties to use when updating an Azure -// Storage account. -type UpdateStorageAccountProperties struct { - // AccessKey - The updated access key associated with this Azure Storage account that will be used to connect to it. - AccessKey *string `json:"accessKey,omitempty"` - // Suffix - The optional suffix for the storage account. - Suffix *string `json:"suffix,omitempty"` -} - -// UpdateStorageAccountWithAccountParameters the parameters used to update an Azure Storage account while -// updating a Data Lake Analytics account. -type UpdateStorageAccountWithAccountParameters struct { - // Name - The unique name of the Azure Storage account to update. - Name *string `json:"name,omitempty"` - // UpdateStorageAccountProperties - The Azure Storage account properties to use when updating an Azure Storage account. - *UpdateStorageAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateStorageAccountWithAccountParameters. -func (usawap UpdateStorageAccountWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if usawap.Name != nil { - objectMap["name"] = usawap.Name - } - if usawap.UpdateStorageAccountProperties != nil { - objectMap["properties"] = usawap.UpdateStorageAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateStorageAccountWithAccountParameters struct. -func (usawap *UpdateStorageAccountWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - usawap.Name = &name - } - case "properties": - if v != nil { - var updateStorageAccountProperties UpdateStorageAccountProperties - err = json.Unmarshal(*v, &updateStorageAccountProperties) - if err != nil { - return err - } - usawap.UpdateStorageAccountProperties = &updateStorageAccountProperties - } - } - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/operations.go deleted file mode 100644 index 1c55f8581919..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/operations.go +++ /dev/null @@ -1,98 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// OperationsClient is the creates an Azure Data Lake Analytics account management client. -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists all of the available Data Lake Analytics REST API operations. -func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "account.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.OperationsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.DataLakeAnalytics/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/storageaccounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/storageaccounts.go deleted file mode 100644 index 25af080a0fd2..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/storageaccounts.go +++ /dev/null @@ -1,841 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// StorageAccountsClient is the creates an Azure Data Lake Analytics account management client. -type StorageAccountsClient struct { - BaseClient -} - -// NewStorageAccountsClient creates an instance of the StorageAccountsClient client. -func NewStorageAccountsClient(subscriptionID string) StorageAccountsClient { - return NewStorageAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewStorageAccountsClientWithBaseURI creates an instance of the StorageAccountsClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewStorageAccountsClientWithBaseURI(baseURI string, subscriptionID string) StorageAccountsClient { - return StorageAccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Add updates the specified Data Lake Analytics account to add an Azure Storage account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the name of the Azure Storage account to add -// parameters - the parameters containing the access key and optional suffix for the Azure Storage Account. -func (client StorageAccountsClient) Add(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, parameters AddStorageAccountParameters) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.Add") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.AddStorageAccountProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.AddStorageAccountProperties.AccessKey", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.StorageAccountsClient", "Add", err.Error()) - } - - req, err := client.AddPreparer(ctx, resourceGroupName, accountName, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Add", nil, "Failure preparing request") - return - } - - resp, err := client.AddSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Add", resp, "Failure sending request") - return - } - - result, err = client.AddResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Add", resp, "Failure responding to request") - return - } - - return -} - -// AddPreparer prepares the Add request. -func (client StorageAccountsClient) AddPreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, parameters AddStorageAccountParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// AddSender sends the Add request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) AddSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// AddResponder handles the response to the Add request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) AddResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Delete updates the specified Data Lake Analytics account to remove an Azure Storage account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the name of the Azure Storage account to remove -func (client StorageAccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client StorageAccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Azure Storage account linked to the given Data Lake Analytics account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the name of the Azure Storage account for which to retrieve the details. -func (client StorageAccountsClient) Get(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (result StorageAccountInformation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client StorageAccountsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) GetResponder(resp *http.Response) (result StorageAccountInformation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetStorageContainer gets the specified Azure Storage container associated with the given Data Lake Analytics and -// Azure Storage accounts. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the name of the Azure storage account from which to retrieve the blob container. -// containerName - the name of the Azure storage container to retrieve -func (client StorageAccountsClient) GetStorageContainer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, containerName string) (result StorageContainer, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.GetStorageContainer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetStorageContainerPreparer(ctx, resourceGroupName, accountName, storageAccountName, containerName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "GetStorageContainer", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageContainerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "GetStorageContainer", resp, "Failure sending request") - return - } - - result, err = client.GetStorageContainerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "GetStorageContainer", resp, "Failure responding to request") - return - } - - return -} - -// GetStorageContainerPreparer prepares the GetStorageContainer request. -func (client StorageAccountsClient) GetStorageContainerPreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, containerName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}/containers/{containerName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageContainerSender sends the GetStorageContainer request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) GetStorageContainerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetStorageContainerResponder handles the response to the GetStorageContainer request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) GetStorageContainerResponder(resp *http.Response) (result StorageContainer, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount gets the first page of Azure Storage accounts, if any, linked to the specified Data Lake Analytics -// account. The response includes a link to the next page, if any. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// filter - the OData filter. Optional. -// top - the number of items to return. Optional. -// skip - the number of items to skip over before returning elements. Optional. -// selectParameter - oData Select statement. Limits the properties on each entry to just those requested, e.g. -// Categories?$select=CategoryName,Description. Optional. -// orderby - orderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or -// "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. -// Optional. -// count - the Boolean value of true or false to request a count of the matching resources included with the -// resources in the response, e.g. Categories?$count=true. Optional. -func (client StorageAccountsClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result StorageAccountInformationListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.ListByAccount") - defer func() { - sc := -1 - if result.sailr.Response.Response != nil { - sc = result.sailr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.StorageAccountsClient", "ListByAccount", err.Error()) - } - - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName, filter, top, skip, selectParameter, orderby, count) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.sailr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.sailr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.sailr.hasNextLink() && result.sailr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client StorageAccountsClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - if len(orderby) > 0 { - queryParameters["$orderby"] = autorest.Encode("query", orderby) - } - if count != nil { - queryParameters["$count"] = autorest.Encode("query", *count) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) ListByAccountResponder(resp *http.Response) (result StorageAccountInformationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client StorageAccountsClient) listByAccountNextResults(ctx context.Context, lastResults StorageAccountInformationListResult) (result StorageAccountInformationListResult, err error) { - req, err := lastResults.storageAccountInformationListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client StorageAccountsClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result StorageAccountInformationListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName, filter, top, skip, selectParameter, orderby, count) - return -} - -// ListSasTokens gets the SAS token associated with the specified Data Lake Analytics and Azure Storage account and -// container combination. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the name of the Azure storage account for which the SAS token is being requested. -// containerName - the name of the Azure storage container for which the SAS token is being requested. -func (client StorageAccountsClient) ListSasTokens(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, containerName string) (result SasTokenInformationListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.ListSasTokens") - defer func() { - sc := -1 - if result.stilr.Response.Response != nil { - sc = result.stilr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listSasTokensNextResults - req, err := client.ListSasTokensPreparer(ctx, resourceGroupName, accountName, storageAccountName, containerName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListSasTokens", nil, "Failure preparing request") - return - } - - resp, err := client.ListSasTokensSender(req) - if err != nil { - result.stilr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListSasTokens", resp, "Failure sending request") - return - } - - result.stilr, err = client.ListSasTokensResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListSasTokens", resp, "Failure responding to request") - return - } - if result.stilr.hasNextLink() && result.stilr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListSasTokensPreparer prepares the ListSasTokens request. -func (client StorageAccountsClient) ListSasTokensPreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, containerName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}/containers/{containerName}/listSasTokens", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSasTokensSender sends the ListSasTokens request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) ListSasTokensSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListSasTokensResponder handles the response to the ListSasTokens request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) ListSasTokensResponder(resp *http.Response) (result SasTokenInformationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listSasTokensNextResults retrieves the next set of results, if any. -func (client StorageAccountsClient) listSasTokensNextResults(ctx context.Context, lastResults SasTokenInformationListResult) (result SasTokenInformationListResult, err error) { - req, err := lastResults.sasTokenInformationListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listSasTokensNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSasTokensSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listSasTokensNextResults", resp, "Failure sending next results request") - } - result, err = client.ListSasTokensResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listSasTokensNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListSasTokensComplete enumerates all values, automatically crossing page boundaries as required. -func (client StorageAccountsClient) ListSasTokensComplete(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, containerName string) (result SasTokenInformationListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.ListSasTokens") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListSasTokens(ctx, resourceGroupName, accountName, storageAccountName, containerName) - return -} - -// ListStorageContainers lists the Azure Storage containers, if any, associated with the specified Data Lake Analytics -// and Azure Storage account combination. The response includes a link to the next page of results, if any. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the name of the Azure storage account from which to list blob containers. -func (client StorageAccountsClient) ListStorageContainers(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (result StorageContainerListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.ListStorageContainers") - defer func() { - sc := -1 - if result.sclr.Response.Response != nil { - sc = result.sclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listStorageContainersNextResults - req, err := client.ListStorageContainersPreparer(ctx, resourceGroupName, accountName, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListStorageContainers", nil, "Failure preparing request") - return - } - - resp, err := client.ListStorageContainersSender(req) - if err != nil { - result.sclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListStorageContainers", resp, "Failure sending request") - return - } - - result.sclr, err = client.ListStorageContainersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "ListStorageContainers", resp, "Failure responding to request") - return - } - if result.sclr.hasNextLink() && result.sclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListStorageContainersPreparer prepares the ListStorageContainers request. -func (client StorageAccountsClient) ListStorageContainersPreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}/containers", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListStorageContainersSender sends the ListStorageContainers request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) ListStorageContainersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListStorageContainersResponder handles the response to the ListStorageContainers request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) ListStorageContainersResponder(resp *http.Response) (result StorageContainerListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listStorageContainersNextResults retrieves the next set of results, if any. -func (client StorageAccountsClient) listStorageContainersNextResults(ctx context.Context, lastResults StorageContainerListResult) (result StorageContainerListResult, err error) { - req, err := lastResults.storageContainerListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listStorageContainersNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListStorageContainersSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listStorageContainersNextResults", resp, "Failure sending next results request") - } - result, err = client.ListStorageContainersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "listStorageContainersNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListStorageContainersComplete enumerates all values, automatically crossing page boundaries as required. -func (client StorageAccountsClient) ListStorageContainersComplete(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string) (result StorageContainerListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.ListStorageContainers") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListStorageContainers(ctx, resourceGroupName, accountName, storageAccountName) - return -} - -// Update updates the Data Lake Analytics account to replace Azure Storage blob account details, such as the access key -// and/or suffix. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Analytics account. -// storageAccountName - the Azure Storage account to modify -// parameters - the parameters containing the access key and suffix to update the storage account with, if any. -// Passing nothing results in no change. -func (client StorageAccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, parameters *UpdateStorageAccountParameters) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageAccountsClient.Update") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.StorageAccountsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client StorageAccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, storageAccountName string, parameters *UpdateStorageAccountParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "storageAccountName": autorest.Encode("path", storageAccountName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/storageAccounts/{storageAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client StorageAccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client StorageAccountsClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/_meta.json deleted file mode 100644 index 84b2cc17f287..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/datalake-store/resource-manager/readme.md", - "tag": "package-2016-11", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2016-11 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/datalake-store/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/accounts.go deleted file mode 100644 index 4619991923ff..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/accounts.go +++ /dev/null @@ -1,834 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AccountsClient is the creates an Azure Data Lake Store account management client. -type AccountsClient struct { - BaseClient -} - -// NewAccountsClient creates an instance of the AccountsClient client. -func NewAccountsClient(subscriptionID string) AccountsClient { - return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { - return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckNameAvailability checks whether the specified account name is available or taken. -// Parameters: -// location - the resource location without whitespace. -// parameters - parameters supplied to check the Data Lake Store account name availability. -func (client AccountsClient) CheckNameAvailability(ctx context.Context, location string, parameters CheckNameAvailabilityParameters) (result NameAvailabilityInformation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "CheckNameAvailability", err.Error()) - } - - req, err := client.CheckNameAvailabilityPreparer(ctx, location, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") - return - } - - resp, err := client.CheckNameAvailabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") - return - } - - result, err = client.CheckNameAvailabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") - return - } - - return -} - -// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. -func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, location string, parameters CheckNameAvailabilityParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeStore/locations/{location}/checkNameAvailability", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always -// closes the http.Response Body. -func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result NameAvailabilityInformation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Create creates the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// parameters - parameters supplied to create the Data Lake Store account. -func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters CreateDataLakeStoreAccountParameters) (result AccountsCreateFutureType, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Identity", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Identity.Type", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.CreateDataLakeStoreAccountProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeStoreAccountProperties.EncryptionConfig", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeStoreAccountProperties.EncryptionConfig.KeyVaultMetaInfo", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CreateDataLakeStoreAccountProperties.EncryptionConfig.KeyVaultMetaInfo.KeyVaultResourceID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateDataLakeStoreAccountProperties.EncryptionConfig.KeyVaultMetaInfo.EncryptionKeyName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateDataLakeStoreAccountProperties.EncryptionConfig.KeyVaultMetaInfo.EncryptionKeyVersion", Name: validation.Null, Rule: true, Chain: nil}, - }}, - }}, - }}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Create", nil, "Failure preparing request") - return - } - - result, err = client.CreateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Create", result.Response(), "Failure sending request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters CreateDataLakeStoreAccountParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFutureType, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client AccountsClient) CreateResponder(resp *http.Response) (result DataLakeStoreAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result AccountsDeleteFutureType, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) DeleteSender(req *http.Request) (future AccountsDeleteFutureType, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// EnableKeyVault attempts to enable a user managed Key Vault for encryption of the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -func (client AccountsClient) EnableKeyVault(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.EnableKeyVault") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.EnableKeyVaultPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "EnableKeyVault", nil, "Failure preparing request") - return - } - - resp, err := client.EnableKeyVaultSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.AccountsClient", "EnableKeyVault", resp, "Failure sending request") - return - } - - result, err = client.EnableKeyVaultResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "EnableKeyVault", resp, "Failure responding to request") - return - } - - return -} - -// EnableKeyVaultPreparer prepares the EnableKeyVault request. -func (client AccountsClient) EnableKeyVaultPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/enableKeyVault", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EnableKeyVaultSender sends the EnableKeyVault request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) EnableKeyVaultSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// EnableKeyVaultResponder handles the response to the EnableKeyVault request. The method always -// closes the http.Response Body. -func (client AccountsClient) EnableKeyVaultResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -func (client AccountsClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result DataLakeStoreAccount, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AccountsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AccountsClient) GetResponder(resp *http.Response) (result DataLakeStoreAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of -// results, if any. -// Parameters: -// filter - oData filter. Optional. -// top - the number of items to return. Optional. -// skip - the number of items to skip over before returning elements. Optional. -// selectParameter - oData Select statement. Limits the properties on each entry to just those requested, e.g. -// Categories?$select=CategoryName,Description. Optional. -// orderby - orderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or -// "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. -// Optional. -// count - the Boolean value of true or false to request a count of the matching resources included with the -// resources in the response, e.g. Categories?$count=true. Optional. -func (client AccountsClient) List(ctx context.Context, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeStoreAccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") - defer func() { - sc := -1 - if result.dlsalr.Response.Response != nil { - sc = result.dlsalr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, filter, top, skip, selectParameter, orderby, count) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.dlsalr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "List", resp, "Failure sending request") - return - } - - result.dlsalr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "List", resp, "Failure responding to request") - return - } - if result.dlsalr.hasNextLink() && result.dlsalr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client AccountsClient) ListPreparer(ctx context.Context, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - if len(orderby) > 0 { - queryParameters["$orderby"] = autorest.Encode("query", orderby) - } - if count != nil { - queryParameters["$count"] = autorest.Encode("query", *count) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeStore/accounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListResponder(resp *http.Response) (result DataLakeStoreAccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client AccountsClient) listNextResults(ctx context.Context, lastResults DataLakeStoreAccountListResult) (result DataLakeStoreAccountListResult, err error) { - req, err := lastResults.dataLakeStoreAccountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client AccountsClient) ListComplete(ctx context.Context, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeStoreAccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, filter, top, skip, selectParameter, orderby, count) - return -} - -// ListByResourceGroup lists the Data Lake Store accounts within a specific resource group. The response includes a -// link to the next page of results, if any. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// filter - oData filter. Optional. -// top - the number of items to return. Optional. -// skip - the number of items to skip over before returning elements. Optional. -// selectParameter - oData Select statement. Limits the properties on each entry to just those requested, e.g. -// Categories?$select=CategoryName,Description. Optional. -// orderby - orderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or -// "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. -// Optional. -// count - a Boolean value of true or false to request a count of the matching resources included with the -// resources in the response, e.g. Categories?$count=true. Optional. -func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeStoreAccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.dlsalr.Response.Response != nil { - sc = result.dlsalr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.AccountsClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter, top, skip, selectParameter, orderby, count) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.dlsalr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.dlsalr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.dlsalr.hasNextLink() && result.dlsalr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - if len(orderby) > 0 { - queryParameters["$orderby"] = autorest.Encode("query", orderby) - } - if count != nil { - queryParameters["$count"] = autorest.Encode("query", *count) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result DataLakeStoreAccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client AccountsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DataLakeStoreAccountListResult) (result DataLakeStoreAccountListResult, err error) { - req, err := lastResults.dataLakeStoreAccountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.AccountsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client AccountsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, filter string, top *int32, skip *int32, selectParameter string, orderby string, count *bool) (result DataLakeStoreAccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, filter, top, skip, selectParameter, orderby, count) - return -} - -// Update updates the specified Data Lake Store account information. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// parameters - parameters supplied to update the Data Lake Store account. -func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters UpdateDataLakeStoreAccountParameters) (result AccountsUpdateFutureType, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Update", nil, "Failure preparing request") - return - } - - result, err = client.UpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsClient", "Update", result.Response(), "Failure sending request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters UpdateDataLakeStoreAccountParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) UpdateSender(req *http.Request) (future AccountsUpdateFutureType, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client AccountsClient) UpdateResponder(resp *http.Response) (result DataLakeStoreAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/enums.go deleted file mode 100644 index e709b16226c7..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/enums.go +++ /dev/null @@ -1,231 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// DataLakeStoreAccountState enumerates the values for data lake store account state. -type DataLakeStoreAccountState string - -const ( - // Active ... - Active DataLakeStoreAccountState = "Active" - // Suspended ... - Suspended DataLakeStoreAccountState = "Suspended" -) - -// PossibleDataLakeStoreAccountStateValues returns an array of possible values for the DataLakeStoreAccountState const type. -func PossibleDataLakeStoreAccountStateValues() []DataLakeStoreAccountState { - return []DataLakeStoreAccountState{Active, Suspended} -} - -// DataLakeStoreAccountStatus enumerates the values for data lake store account status. -type DataLakeStoreAccountStatus string - -const ( - // Canceled ... - Canceled DataLakeStoreAccountStatus = "Canceled" - // Creating ... - Creating DataLakeStoreAccountStatus = "Creating" - // Deleted ... - Deleted DataLakeStoreAccountStatus = "Deleted" - // Deleting ... - Deleting DataLakeStoreAccountStatus = "Deleting" - // Failed ... - Failed DataLakeStoreAccountStatus = "Failed" - // Patching ... - Patching DataLakeStoreAccountStatus = "Patching" - // Resuming ... - Resuming DataLakeStoreAccountStatus = "Resuming" - // Running ... - Running DataLakeStoreAccountStatus = "Running" - // Succeeded ... - Succeeded DataLakeStoreAccountStatus = "Succeeded" - // Suspending ... - Suspending DataLakeStoreAccountStatus = "Suspending" - // Undeleting ... - Undeleting DataLakeStoreAccountStatus = "Undeleting" -) - -// PossibleDataLakeStoreAccountStatusValues returns an array of possible values for the DataLakeStoreAccountStatus const type. -func PossibleDataLakeStoreAccountStatusValues() []DataLakeStoreAccountStatus { - return []DataLakeStoreAccountStatus{Canceled, Creating, Deleted, Deleting, Failed, Patching, Resuming, Running, Succeeded, Suspending, Undeleting} -} - -// EncryptionConfigType enumerates the values for encryption config type. -type EncryptionConfigType string - -const ( - // ServiceManaged ... - ServiceManaged EncryptionConfigType = "ServiceManaged" - // UserManaged ... - UserManaged EncryptionConfigType = "UserManaged" -) - -// PossibleEncryptionConfigTypeValues returns an array of possible values for the EncryptionConfigType const type. -func PossibleEncryptionConfigTypeValues() []EncryptionConfigType { - return []EncryptionConfigType{ServiceManaged, UserManaged} -} - -// EncryptionProvisioningState enumerates the values for encryption provisioning state. -type EncryptionProvisioningState string - -const ( - // EncryptionProvisioningStateCreating ... - EncryptionProvisioningStateCreating EncryptionProvisioningState = "Creating" - // EncryptionProvisioningStateSucceeded ... - EncryptionProvisioningStateSucceeded EncryptionProvisioningState = "Succeeded" -) - -// PossibleEncryptionProvisioningStateValues returns an array of possible values for the EncryptionProvisioningState const type. -func PossibleEncryptionProvisioningStateValues() []EncryptionProvisioningState { - return []EncryptionProvisioningState{EncryptionProvisioningStateCreating, EncryptionProvisioningStateSucceeded} -} - -// EncryptionState enumerates the values for encryption state. -type EncryptionState string - -const ( - // Disabled ... - Disabled EncryptionState = "Disabled" - // Enabled ... - Enabled EncryptionState = "Enabled" -) - -// PossibleEncryptionStateValues returns an array of possible values for the EncryptionState const type. -func PossibleEncryptionStateValues() []EncryptionState { - return []EncryptionState{Disabled, Enabled} -} - -// FirewallAllowAzureIpsState enumerates the values for firewall allow azure ips state. -type FirewallAllowAzureIpsState string - -const ( - // FirewallAllowAzureIpsStateDisabled ... - FirewallAllowAzureIpsStateDisabled FirewallAllowAzureIpsState = "Disabled" - // FirewallAllowAzureIpsStateEnabled ... - FirewallAllowAzureIpsStateEnabled FirewallAllowAzureIpsState = "Enabled" -) - -// PossibleFirewallAllowAzureIpsStateValues returns an array of possible values for the FirewallAllowAzureIpsState const type. -func PossibleFirewallAllowAzureIpsStateValues() []FirewallAllowAzureIpsState { - return []FirewallAllowAzureIpsState{FirewallAllowAzureIpsStateDisabled, FirewallAllowAzureIpsStateEnabled} -} - -// FirewallState enumerates the values for firewall state. -type FirewallState string - -const ( - // FirewallStateDisabled ... - FirewallStateDisabled FirewallState = "Disabled" - // FirewallStateEnabled ... - FirewallStateEnabled FirewallState = "Enabled" -) - -// PossibleFirewallStateValues returns an array of possible values for the FirewallState const type. -func PossibleFirewallStateValues() []FirewallState { - return []FirewallState{FirewallStateDisabled, FirewallStateEnabled} -} - -// OperationOrigin enumerates the values for operation origin. -type OperationOrigin string - -const ( - // System ... - System OperationOrigin = "system" - // User ... - User OperationOrigin = "user" - // Usersystem ... - Usersystem OperationOrigin = "user,system" -) - -// PossibleOperationOriginValues returns an array of possible values for the OperationOrigin const type. -func PossibleOperationOriginValues() []OperationOrigin { - return []OperationOrigin{System, User, Usersystem} -} - -// SubscriptionState enumerates the values for subscription state. -type SubscriptionState string - -const ( - // SubscriptionStateDeleted ... - SubscriptionStateDeleted SubscriptionState = "Deleted" - // SubscriptionStateRegistered ... - SubscriptionStateRegistered SubscriptionState = "Registered" - // SubscriptionStateSuspended ... - SubscriptionStateSuspended SubscriptionState = "Suspended" - // SubscriptionStateUnregistered ... - SubscriptionStateUnregistered SubscriptionState = "Unregistered" - // SubscriptionStateWarned ... - SubscriptionStateWarned SubscriptionState = "Warned" -) - -// PossibleSubscriptionStateValues returns an array of possible values for the SubscriptionState const type. -func PossibleSubscriptionStateValues() []SubscriptionState { - return []SubscriptionState{SubscriptionStateDeleted, SubscriptionStateRegistered, SubscriptionStateSuspended, SubscriptionStateUnregistered, SubscriptionStateWarned} -} - -// TierType enumerates the values for tier type. -type TierType string - -const ( - // Commitment100TB ... - Commitment100TB TierType = "Commitment_100TB" - // Commitment10TB ... - Commitment10TB TierType = "Commitment_10TB" - // Commitment1PB ... - Commitment1PB TierType = "Commitment_1PB" - // Commitment1TB ... - Commitment1TB TierType = "Commitment_1TB" - // Commitment500TB ... - Commitment500TB TierType = "Commitment_500TB" - // Commitment5PB ... - Commitment5PB TierType = "Commitment_5PB" - // Consumption ... - Consumption TierType = "Consumption" -) - -// PossibleTierTypeValues returns an array of possible values for the TierType const type. -func PossibleTierTypeValues() []TierType { - return []TierType{Commitment100TB, Commitment10TB, Commitment1PB, Commitment1TB, Commitment500TB, Commitment5PB, Consumption} -} - -// TrustedIDProviderState enumerates the values for trusted id provider state. -type TrustedIDProviderState string - -const ( - // TrustedIDProviderStateDisabled ... - TrustedIDProviderStateDisabled TrustedIDProviderState = "Disabled" - // TrustedIDProviderStateEnabled ... - TrustedIDProviderStateEnabled TrustedIDProviderState = "Enabled" -) - -// PossibleTrustedIDProviderStateValues returns an array of possible values for the TrustedIDProviderState const type. -func PossibleTrustedIDProviderStateValues() []TrustedIDProviderState { - return []TrustedIDProviderState{TrustedIDProviderStateDisabled, TrustedIDProviderStateEnabled} -} - -// UsageUnit enumerates the values for usage unit. -type UsageUnit string - -const ( - // Bytes ... - Bytes UsageUnit = "Bytes" - // BytesPerSecond ... - BytesPerSecond UsageUnit = "BytesPerSecond" - // Count ... - Count UsageUnit = "Count" - // CountsPerSecond ... - CountsPerSecond UsageUnit = "CountsPerSecond" - // Percent ... - Percent UsageUnit = "Percent" - // Seconds ... - Seconds UsageUnit = "Seconds" -) - -// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type. -func PossibleUsageUnitValues() []UsageUnit { - return []UsageUnit{Bytes, BytesPerSecond, Count, CountsPerSecond, Percent, Seconds} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/firewallrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/firewallrules.go deleted file mode 100644 index d55fc5141ef8..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/firewallrules.go +++ /dev/null @@ -1,480 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// FirewallRulesClient is the creates an Azure Data Lake Store account management client. -type FirewallRulesClient struct { - BaseClient -} - -// NewFirewallRulesClient creates an instance of the FirewallRulesClient client. -func NewFirewallRulesClient(subscriptionID string) FirewallRulesClient { - return NewFirewallRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewFirewallRulesClientWithBaseURI creates an instance of the FirewallRulesClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewFirewallRulesClientWithBaseURI(baseURI string, subscriptionID string) FirewallRulesClient { - return FirewallRulesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates the specified firewall rule. During update, the firewall rule with the specified -// name will be replaced with this new firewall rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// firewallRuleName - the name of the firewall rule to create or update. -// parameters - parameters supplied to create or update the firewall rule. -func (client FirewallRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters CreateOrUpdateFirewallRuleParameters) (result FirewallRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CreateOrUpdateFirewallRuleProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateFirewallRuleProperties.StartIPAddress", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CreateOrUpdateFirewallRuleProperties.EndIPAddress", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("account.FirewallRulesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, firewallRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client FirewallRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters CreateOrUpdateFirewallRuleParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the specified firewall rule from the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// firewallRuleName - the name of the firewall rule to delete. -func (client FirewallRulesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, firewallRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client FirewallRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Store firewall rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// firewallRuleName - the name of the firewall rule to retrieve. -func (client FirewallRulesClient) Get(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (result FirewallRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, firewallRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client FirewallRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) GetResponder(resp *http.Response) (result FirewallRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount lists the Data Lake Store firewall rules within the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -func (client FirewallRulesClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string) (result FirewallRuleListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.ListByAccount") - defer func() { - sc := -1 - if result.frlr.Response.Response != nil { - sc = result.frlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.frlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.frlr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.frlr.hasNextLink() && result.frlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client FirewallRulesClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) ListByAccountResponder(resp *http.Response) (result FirewallRuleListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client FirewallRulesClient) listByAccountNextResults(ctx context.Context, lastResults FirewallRuleListResult) (result FirewallRuleListResult, err error) { - req, err := lastResults.firewallRuleListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.FirewallRulesClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.FirewallRulesClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client FirewallRulesClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string) (result FirewallRuleListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName) - return -} - -// Update updates the specified firewall rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// firewallRuleName - the name of the firewall rule to update. -// parameters - parameters supplied to update the firewall rule. -func (client FirewallRulesClient) Update(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters *UpdateFirewallRuleParameters) (result FirewallRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRulesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, firewallRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.FirewallRulesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client FirewallRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, firewallRuleName string, parameters *UpdateFirewallRuleParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "firewallRuleName": autorest.Encode("path", firewallRuleName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client FirewallRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client FirewallRulesClient) UpdateResponder(resp *http.Response) (result FirewallRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/locations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/locations.go deleted file mode 100644 index d27d4e81c22b..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/locations.go +++ /dev/null @@ -1,179 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// LocationsClient is the creates an Azure Data Lake Store account management client. -type LocationsClient struct { - BaseClient -} - -// NewLocationsClient creates an instance of the LocationsClient client. -func NewLocationsClient(subscriptionID string) LocationsClient { - return NewLocationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewLocationsClientWithBaseURI creates an instance of the LocationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewLocationsClientWithBaseURI(baseURI string, subscriptionID string) LocationsClient { - return LocationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// GetCapability gets subscription-level properties and limits for Data Lake Store specified by resource location. -// Parameters: -// location - the resource location without whitespace. -func (client LocationsClient) GetCapability(ctx context.Context, location string) (result CapabilityInformation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LocationsClient.GetCapability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCapabilityPreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetCapability", nil, "Failure preparing request") - return - } - - resp, err := client.GetCapabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetCapability", resp, "Failure sending request") - return - } - - result, err = client.GetCapabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetCapability", resp, "Failure responding to request") - return - } - - return -} - -// GetCapabilityPreparer prepares the GetCapability request. -func (client LocationsClient) GetCapabilityPreparer(ctx context.Context, location string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeStore/locations/{location}/capability", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCapabilitySender sends the GetCapability request. The method will close the -// http.Response Body if it receives an error. -func (client LocationsClient) GetCapabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetCapabilityResponder handles the response to the GetCapability request. The method always -// closes the http.Response Body. -func (client LocationsClient) GetCapabilityResponder(resp *http.Response) (result CapabilityInformation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetUsage gets the current usage count and the limit for the resources of the location under the subscription. -// Parameters: -// location - the resource location without whitespace. -func (client LocationsClient) GetUsage(ctx context.Context, location string) (result UsageListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LocationsClient.GetUsage") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetUsagePreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetUsage", nil, "Failure preparing request") - return - } - - resp, err := client.GetUsageSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetUsage", resp, "Failure sending request") - return - } - - result, err = client.GetUsageResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.LocationsClient", "GetUsage", resp, "Failure responding to request") - return - } - - return -} - -// GetUsagePreparer prepares the GetUsage request. -func (client LocationsClient) GetUsagePreparer(ctx context.Context, location string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeStore/locations/{location}/usages", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetUsageSender sends the GetUsage request. The method will close the -// http.Response Body if it receives an error. -func (client LocationsClient) GetUsageSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetUsageResponder handles the response to the GetUsage request. The method always -// closes the http.Response Body. -func (client LocationsClient) GetUsageResponder(resp *http.Response) (result UsageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/models.go deleted file mode 100644 index f069e6a13fbf..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/models.go +++ /dev/null @@ -1,2324 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "github.com/gofrs/uuid" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account" - -// AccountsCreateFutureType an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsCreateFutureType struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (DataLakeStoreAccount, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsCreateFutureType) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsCreateFutureType.Result. -func (future *AccountsCreateFutureType) result(client AccountsClient) (dlsa DataLakeStoreAccount, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsCreateFutureType", "Result", future.Response(), "Polling failure") - return - } - if !done { - dlsa.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("account.AccountsCreateFutureType") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if dlsa.Response.Response, err = future.GetResult(sender); err == nil && dlsa.Response.Response.StatusCode != http.StatusNoContent { - dlsa, err = client.CreateResponder(dlsa.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsCreateFutureType", "Result", dlsa.Response.Response, "Failure responding to request") - } - } - return -} - -// AccountsDeleteFutureType an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsDeleteFutureType struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsDeleteFutureType) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsDeleteFutureType.Result. -func (future *AccountsDeleteFutureType) result(client AccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsDeleteFutureType", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("account.AccountsDeleteFutureType") - return - } - ar.Response = future.Response() - return -} - -// AccountsUpdateFutureType an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsUpdateFutureType struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (DataLakeStoreAccount, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsUpdateFutureType) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsUpdateFutureType.Result. -func (future *AccountsUpdateFutureType) result(client AccountsClient) (dlsa DataLakeStoreAccount, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsUpdateFutureType", "Result", future.Response(), "Polling failure") - return - } - if !done { - dlsa.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("account.AccountsUpdateFutureType") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if dlsa.Response.Response, err = future.GetResult(sender); err == nil && dlsa.Response.Response.StatusCode != http.StatusNoContent { - dlsa, err = client.UpdateResponder(dlsa.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "account.AccountsUpdateFutureType", "Result", dlsa.Response.Response, "Failure responding to request") - } - } - return -} - -// CapabilityInformation subscription-level properties and limits for Data Lake Store. -type CapabilityInformation struct { - autorest.Response `json:"-"` - // SubscriptionID - READ-ONLY; The subscription credentials that uniquely identifies the subscription. - SubscriptionID *uuid.UUID `json:"subscriptionId,omitempty"` - // State - READ-ONLY; The subscription state. Possible values include: 'SubscriptionStateRegistered', 'SubscriptionStateSuspended', 'SubscriptionStateDeleted', 'SubscriptionStateUnregistered', 'SubscriptionStateWarned' - State SubscriptionState `json:"state,omitempty"` - // MaxAccountCount - READ-ONLY; The maximum supported number of accounts under this subscription. - MaxAccountCount *int32 `json:"maxAccountCount,omitempty"` - // AccountCount - READ-ONLY; The current number of accounts under this subscription. - AccountCount *int32 `json:"accountCount,omitempty"` - // MigrationState - READ-ONLY; The Boolean value of true or false to indicate the maintenance state. - MigrationState *bool `json:"migrationState,omitempty"` -} - -// MarshalJSON is the custom marshaler for CapabilityInformation. -func (ci CapabilityInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CheckNameAvailabilityParameters data Lake Store account name availability check parameters. -type CheckNameAvailabilityParameters struct { - // Name - The Data Lake Store name to check availability for. - Name *string `json:"name,omitempty"` - // Type - The resource type. Note: This should not be set by the user, as the constant value is Microsoft.DataLakeStore/accounts - Type *string `json:"type,omitempty"` -} - -// CreateDataLakeStoreAccountParameters ... -type CreateDataLakeStoreAccountParameters struct { - // Location - The resource location. - Location *string `json:"location,omitempty"` - // Tags - The resource tags. - Tags map[string]*string `json:"tags"` - // Identity - The Key Vault encryption identity, if any. - Identity *EncryptionIdentity `json:"identity,omitempty"` - // CreateDataLakeStoreAccountProperties - The Data Lake Store account properties to use for creating. - *CreateDataLakeStoreAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateDataLakeStoreAccountParameters. -func (cdlsap CreateDataLakeStoreAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cdlsap.Location != nil { - objectMap["location"] = cdlsap.Location - } - if cdlsap.Tags != nil { - objectMap["tags"] = cdlsap.Tags - } - if cdlsap.Identity != nil { - objectMap["identity"] = cdlsap.Identity - } - if cdlsap.CreateDataLakeStoreAccountProperties != nil { - objectMap["properties"] = cdlsap.CreateDataLakeStoreAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateDataLakeStoreAccountParameters struct. -func (cdlsap *CreateDataLakeStoreAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - cdlsap.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - cdlsap.Tags = tags - } - case "identity": - if v != nil { - var identity EncryptionIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - cdlsap.Identity = &identity - } - case "properties": - if v != nil { - var createDataLakeStoreAccountProperties CreateDataLakeStoreAccountProperties - err = json.Unmarshal(*v, &createDataLakeStoreAccountProperties) - if err != nil { - return err - } - cdlsap.CreateDataLakeStoreAccountProperties = &createDataLakeStoreAccountProperties - } - } - } - - return nil -} - -// CreateDataLakeStoreAccountProperties ... -type CreateDataLakeStoreAccountProperties struct { - // DefaultGroup - The default owner group for all new folders and files created in the Data Lake Store account. - DefaultGroup *string `json:"defaultGroup,omitempty"` - // EncryptionConfig - The Key Vault encryption configuration. - EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"` - // EncryptionState - The current state of encryption for this Data Lake Store account. Possible values include: 'Enabled', 'Disabled' - EncryptionState EncryptionState `json:"encryptionState,omitempty"` - // FirewallRules - The list of firewall rules associated with this Data Lake Store account. - FirewallRules *[]CreateFirewallRuleWithAccountParameters `json:"firewallRules,omitempty"` - // VirtualNetworkRules - The list of virtual network rules associated with this Data Lake Store account. - VirtualNetworkRules *[]CreateVirtualNetworkRuleWithAccountParameters `json:"virtualNetworkRules,omitempty"` - // FirewallState - The current state of the IP address firewall for this Data Lake Store account. Possible values include: 'FirewallStateEnabled', 'FirewallStateDisabled' - FirewallState FirewallState `json:"firewallState,omitempty"` - // FirewallAllowAzureIps - The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'FirewallAllowAzureIpsStateEnabled', 'FirewallAllowAzureIpsStateDisabled' - FirewallAllowAzureIps FirewallAllowAzureIpsState `json:"firewallAllowAzureIps,omitempty"` - // TrustedIDProviders - The list of trusted identity providers associated with this Data Lake Store account. - TrustedIDProviders *[]CreateTrustedIDProviderWithAccountParameters `json:"trustedIdProviders,omitempty"` - // TrustedIDProviderState - The current state of the trusted identity provider feature for this Data Lake Store account. Possible values include: 'TrustedIDProviderStateEnabled', 'TrustedIDProviderStateDisabled' - TrustedIDProviderState TrustedIDProviderState `json:"trustedIdProviderState,omitempty"` - // NewTier - The commitment tier to use for next month. Possible values include: 'Consumption', 'Commitment1TB', 'Commitment10TB', 'Commitment100TB', 'Commitment500TB', 'Commitment1PB', 'Commitment5PB' - NewTier TierType `json:"newTier,omitempty"` -} - -// CreateFirewallRuleWithAccountParameters the parameters used to create a new firewall rule while creating -// a new Data Lake Store account. -type CreateFirewallRuleWithAccountParameters struct { - // Name - The unique name of the firewall rule to create. - Name *string `json:"name,omitempty"` - // CreateOrUpdateFirewallRuleProperties - The firewall rule properties to use when creating a new firewall rule. - *CreateOrUpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateFirewallRuleWithAccountParameters. -func (cfrwap CreateFirewallRuleWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cfrwap.Name != nil { - objectMap["name"] = cfrwap.Name - } - if cfrwap.CreateOrUpdateFirewallRuleProperties != nil { - objectMap["properties"] = cfrwap.CreateOrUpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateFirewallRuleWithAccountParameters struct. -func (cfrwap *CreateFirewallRuleWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - cfrwap.Name = &name - } - case "properties": - if v != nil { - var createOrUpdateFirewallRuleProperties CreateOrUpdateFirewallRuleProperties - err = json.Unmarshal(*v, &createOrUpdateFirewallRuleProperties) - if err != nil { - return err - } - cfrwap.CreateOrUpdateFirewallRuleProperties = &createOrUpdateFirewallRuleProperties - } - } - } - - return nil -} - -// CreateOrUpdateFirewallRuleParameters the parameters used to create a new firewall rule. -type CreateOrUpdateFirewallRuleParameters struct { - // CreateOrUpdateFirewallRuleProperties - The firewall rule properties to use when creating a new firewall rule. - *CreateOrUpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateOrUpdateFirewallRuleParameters. -func (coufrp CreateOrUpdateFirewallRuleParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if coufrp.CreateOrUpdateFirewallRuleProperties != nil { - objectMap["properties"] = coufrp.CreateOrUpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateOrUpdateFirewallRuleParameters struct. -func (coufrp *CreateOrUpdateFirewallRuleParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var createOrUpdateFirewallRuleProperties CreateOrUpdateFirewallRuleProperties - err = json.Unmarshal(*v, &createOrUpdateFirewallRuleProperties) - if err != nil { - return err - } - coufrp.CreateOrUpdateFirewallRuleProperties = &createOrUpdateFirewallRuleProperties - } - } - } - - return nil -} - -// CreateOrUpdateFirewallRuleProperties the firewall rule properties to use when creating a new firewall -// rule. -type CreateOrUpdateFirewallRuleProperties struct { - // StartIPAddress - The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - StartIPAddress *string `json:"startIpAddress,omitempty"` - // EndIPAddress - The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - EndIPAddress *string `json:"endIpAddress,omitempty"` -} - -// CreateOrUpdateTrustedIDProviderParameters the parameters used to create a new trusted identity provider. -type CreateOrUpdateTrustedIDProviderParameters struct { - // CreateOrUpdateTrustedIDProviderProperties - The trusted identity provider properties to use when creating a new trusted identity provider. - *CreateOrUpdateTrustedIDProviderProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateOrUpdateTrustedIDProviderParameters. -func (coutipp CreateOrUpdateTrustedIDProviderParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if coutipp.CreateOrUpdateTrustedIDProviderProperties != nil { - objectMap["properties"] = coutipp.CreateOrUpdateTrustedIDProviderProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateOrUpdateTrustedIDProviderParameters struct. -func (coutipp *CreateOrUpdateTrustedIDProviderParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var createOrUpdateTrustedIDProviderProperties CreateOrUpdateTrustedIDProviderProperties - err = json.Unmarshal(*v, &createOrUpdateTrustedIDProviderProperties) - if err != nil { - return err - } - coutipp.CreateOrUpdateTrustedIDProviderProperties = &createOrUpdateTrustedIDProviderProperties - } - } - } - - return nil -} - -// CreateOrUpdateTrustedIDProviderProperties the trusted identity provider properties to use when creating -// a new trusted identity provider. -type CreateOrUpdateTrustedIDProviderProperties struct { - // IDProvider - The URL of this trusted identity provider. - IDProvider *string `json:"idProvider,omitempty"` -} - -// CreateOrUpdateVirtualNetworkRuleParameters the parameters used to create a new virtual network rule. -type CreateOrUpdateVirtualNetworkRuleParameters struct { - // CreateOrUpdateVirtualNetworkRuleProperties - The virtual network rule properties to use when creating a new virtual network rule. - *CreateOrUpdateVirtualNetworkRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateOrUpdateVirtualNetworkRuleParameters. -func (couvnrp CreateOrUpdateVirtualNetworkRuleParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if couvnrp.CreateOrUpdateVirtualNetworkRuleProperties != nil { - objectMap["properties"] = couvnrp.CreateOrUpdateVirtualNetworkRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateOrUpdateVirtualNetworkRuleParameters struct. -func (couvnrp *CreateOrUpdateVirtualNetworkRuleParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var createOrUpdateVirtualNetworkRuleProperties CreateOrUpdateVirtualNetworkRuleProperties - err = json.Unmarshal(*v, &createOrUpdateVirtualNetworkRuleProperties) - if err != nil { - return err - } - couvnrp.CreateOrUpdateVirtualNetworkRuleProperties = &createOrUpdateVirtualNetworkRuleProperties - } - } - } - - return nil -} - -// CreateOrUpdateVirtualNetworkRuleProperties the virtual network rule properties to use when creating a -// new virtual network rule. -type CreateOrUpdateVirtualNetworkRuleProperties struct { - // SubnetID - The resource identifier for the subnet. - SubnetID *string `json:"subnetId,omitempty"` -} - -// CreateTrustedIDProviderWithAccountParameters the parameters used to create a new trusted identity -// provider while creating a new Data Lake Store account. -type CreateTrustedIDProviderWithAccountParameters struct { - // Name - The unique name of the trusted identity provider to create. - Name *string `json:"name,omitempty"` - // CreateOrUpdateTrustedIDProviderProperties - The trusted identity provider properties to use when creating a new trusted identity provider. - *CreateOrUpdateTrustedIDProviderProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateTrustedIDProviderWithAccountParameters. -func (ctipwap CreateTrustedIDProviderWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ctipwap.Name != nil { - objectMap["name"] = ctipwap.Name - } - if ctipwap.CreateOrUpdateTrustedIDProviderProperties != nil { - objectMap["properties"] = ctipwap.CreateOrUpdateTrustedIDProviderProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateTrustedIDProviderWithAccountParameters struct. -func (ctipwap *CreateTrustedIDProviderWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ctipwap.Name = &name - } - case "properties": - if v != nil { - var createOrUpdateTrustedIDProviderProperties CreateOrUpdateTrustedIDProviderProperties - err = json.Unmarshal(*v, &createOrUpdateTrustedIDProviderProperties) - if err != nil { - return err - } - ctipwap.CreateOrUpdateTrustedIDProviderProperties = &createOrUpdateTrustedIDProviderProperties - } - } - } - - return nil -} - -// CreateVirtualNetworkRuleWithAccountParameters the parameters used to create a new virtual network rule -// while creating a new Data Lake Store account. -type CreateVirtualNetworkRuleWithAccountParameters struct { - // Name - The unique name of the virtual network rule to create. - Name *string `json:"name,omitempty"` - // CreateOrUpdateVirtualNetworkRuleProperties - The virtual network rule properties to use when creating a new virtual network rule. - *CreateOrUpdateVirtualNetworkRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for CreateVirtualNetworkRuleWithAccountParameters. -func (cvnrwap CreateVirtualNetworkRuleWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cvnrwap.Name != nil { - objectMap["name"] = cvnrwap.Name - } - if cvnrwap.CreateOrUpdateVirtualNetworkRuleProperties != nil { - objectMap["properties"] = cvnrwap.CreateOrUpdateVirtualNetworkRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for CreateVirtualNetworkRuleWithAccountParameters struct. -func (cvnrwap *CreateVirtualNetworkRuleWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - cvnrwap.Name = &name - } - case "properties": - if v != nil { - var createOrUpdateVirtualNetworkRuleProperties CreateOrUpdateVirtualNetworkRuleProperties - err = json.Unmarshal(*v, &createOrUpdateVirtualNetworkRuleProperties) - if err != nil { - return err - } - cvnrwap.CreateOrUpdateVirtualNetworkRuleProperties = &createOrUpdateVirtualNetworkRuleProperties - } - } - } - - return nil -} - -// DataLakeStoreAccount data Lake Store account information. -type DataLakeStoreAccount struct { - autorest.Response `json:"-"` - // Identity - READ-ONLY; The Key Vault encryption identity, if any. - Identity *EncryptionIdentity `json:"identity,omitempty"` - // DataLakeStoreAccountProperties - READ-ONLY; The Data Lake Store account properties. - *DataLakeStoreAccountProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // Location - READ-ONLY; The resource location. - Location *string `json:"location,omitempty"` - // Tags - READ-ONLY; The resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccount. -func (dlsa DataLakeStoreAccount) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DataLakeStoreAccount struct. -func (dlsa *DataLakeStoreAccount) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "identity": - if v != nil { - var identity EncryptionIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - dlsa.Identity = &identity - } - case "properties": - if v != nil { - var dataLakeStoreAccountProperties DataLakeStoreAccountProperties - err = json.Unmarshal(*v, &dataLakeStoreAccountProperties) - if err != nil { - return err - } - dlsa.DataLakeStoreAccountProperties = &dataLakeStoreAccountProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dlsa.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dlsa.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dlsa.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - dlsa.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - dlsa.Tags = tags - } - } - } - - return nil -} - -// DataLakeStoreAccountBasic basic Data Lake Store account information, returned on list calls. -type DataLakeStoreAccountBasic struct { - // DataLakeStoreAccountPropertiesBasic - READ-ONLY; The basic Data Lake Store account properties. - *DataLakeStoreAccountPropertiesBasic `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // Location - READ-ONLY; The resource location. - Location *string `json:"location,omitempty"` - // Tags - READ-ONLY; The resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountBasic. -func (dlsab DataLakeStoreAccountBasic) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DataLakeStoreAccountBasic struct. -func (dlsab *DataLakeStoreAccountBasic) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var dataLakeStoreAccountPropertiesBasic DataLakeStoreAccountPropertiesBasic - err = json.Unmarshal(*v, &dataLakeStoreAccountPropertiesBasic) - if err != nil { - return err - } - dlsab.DataLakeStoreAccountPropertiesBasic = &dataLakeStoreAccountPropertiesBasic - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dlsab.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dlsab.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dlsab.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - dlsab.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - dlsab.Tags = tags - } - } - } - - return nil -} - -// DataLakeStoreAccountListResult data Lake Store account list information response. -type DataLakeStoreAccountListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]DataLakeStoreAccountBasic `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountListResult. -func (dlsalr DataLakeStoreAccountListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataLakeStoreAccountListResultIterator provides access to a complete listing of -// DataLakeStoreAccountBasic values. -type DataLakeStoreAccountListResultIterator struct { - i int - page DataLakeStoreAccountListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DataLakeStoreAccountListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DataLakeStoreAccountListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DataLakeStoreAccountListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DataLakeStoreAccountListResultIterator) Response() DataLakeStoreAccountListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DataLakeStoreAccountListResultIterator) Value() DataLakeStoreAccountBasic { - if !iter.page.NotDone() { - return DataLakeStoreAccountBasic{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DataLakeStoreAccountListResultIterator type. -func NewDataLakeStoreAccountListResultIterator(page DataLakeStoreAccountListResultPage) DataLakeStoreAccountListResultIterator { - return DataLakeStoreAccountListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dlsalr DataLakeStoreAccountListResult) IsEmpty() bool { - return dlsalr.Value == nil || len(*dlsalr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dlsalr DataLakeStoreAccountListResult) hasNextLink() bool { - return dlsalr.NextLink != nil && len(*dlsalr.NextLink) != 0 -} - -// dataLakeStoreAccountListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dlsalr DataLakeStoreAccountListResult) dataLakeStoreAccountListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dlsalr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dlsalr.NextLink))) -} - -// DataLakeStoreAccountListResultPage contains a page of DataLakeStoreAccountBasic values. -type DataLakeStoreAccountListResultPage struct { - fn func(context.Context, DataLakeStoreAccountListResult) (DataLakeStoreAccountListResult, error) - dlsalr DataLakeStoreAccountListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DataLakeStoreAccountListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DataLakeStoreAccountListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dlsalr) - if err != nil { - return err - } - page.dlsalr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DataLakeStoreAccountListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DataLakeStoreAccountListResultPage) NotDone() bool { - return !page.dlsalr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DataLakeStoreAccountListResultPage) Response() DataLakeStoreAccountListResult { - return page.dlsalr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DataLakeStoreAccountListResultPage) Values() []DataLakeStoreAccountBasic { - if page.dlsalr.IsEmpty() { - return nil - } - return *page.dlsalr.Value -} - -// Creates a new instance of the DataLakeStoreAccountListResultPage type. -func NewDataLakeStoreAccountListResultPage(cur DataLakeStoreAccountListResult, getNextPage func(context.Context, DataLakeStoreAccountListResult) (DataLakeStoreAccountListResult, error)) DataLakeStoreAccountListResultPage { - return DataLakeStoreAccountListResultPage{ - fn: getNextPage, - dlsalr: cur, - } -} - -// DataLakeStoreAccountProperties data Lake Store account properties information. -type DataLakeStoreAccountProperties struct { - // DefaultGroup - READ-ONLY; The default owner group for all new folders and files created in the Data Lake Store account. - DefaultGroup *string `json:"defaultGroup,omitempty"` - // EncryptionConfig - READ-ONLY; The Key Vault encryption configuration. - EncryptionConfig *EncryptionConfig `json:"encryptionConfig,omitempty"` - // EncryptionState - READ-ONLY; The current state of encryption for this Data Lake Store account. Possible values include: 'Enabled', 'Disabled' - EncryptionState EncryptionState `json:"encryptionState,omitempty"` - // EncryptionProvisioningState - READ-ONLY; The current state of encryption provisioning for this Data Lake Store account. Possible values include: 'EncryptionProvisioningStateCreating', 'EncryptionProvisioningStateSucceeded' - EncryptionProvisioningState EncryptionProvisioningState `json:"encryptionProvisioningState,omitempty"` - // FirewallRules - READ-ONLY; The list of firewall rules associated with this Data Lake Store account. - FirewallRules *[]FirewallRule `json:"firewallRules,omitempty"` - // VirtualNetworkRules - READ-ONLY; The list of virtual network rules associated with this Data Lake Store account. - VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` - // FirewallState - READ-ONLY; The current state of the IP address firewall for this Data Lake Store account. Possible values include: 'FirewallStateEnabled', 'FirewallStateDisabled' - FirewallState FirewallState `json:"firewallState,omitempty"` - // FirewallAllowAzureIps - READ-ONLY; The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'FirewallAllowAzureIpsStateEnabled', 'FirewallAllowAzureIpsStateDisabled' - FirewallAllowAzureIps FirewallAllowAzureIpsState `json:"firewallAllowAzureIps,omitempty"` - // TrustedIDProviders - READ-ONLY; The list of trusted identity providers associated with this Data Lake Store account. - TrustedIDProviders *[]TrustedIDProvider `json:"trustedIdProviders,omitempty"` - // TrustedIDProviderState - READ-ONLY; The current state of the trusted identity provider feature for this Data Lake Store account. Possible values include: 'TrustedIDProviderStateEnabled', 'TrustedIDProviderStateDisabled' - TrustedIDProviderState TrustedIDProviderState `json:"trustedIdProviderState,omitempty"` - // NewTier - READ-ONLY; The commitment tier to use for next month. Possible values include: 'Consumption', 'Commitment1TB', 'Commitment10TB', 'Commitment100TB', 'Commitment500TB', 'Commitment1PB', 'Commitment5PB' - NewTier TierType `json:"newTier,omitempty"` - // CurrentTier - READ-ONLY; The commitment tier in use for the current month. Possible values include: 'Consumption', 'Commitment1TB', 'Commitment10TB', 'Commitment100TB', 'Commitment500TB', 'Commitment1PB', 'Commitment5PB' - CurrentTier TierType `json:"currentTier,omitempty"` - // AccountID - READ-ONLY; The unique identifier associated with this Data Lake Store account. - AccountID *uuid.UUID `json:"accountId,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning status of the Data Lake Store account. Possible values include: 'Failed', 'Creating', 'Running', 'Succeeded', 'Patching', 'Suspending', 'Resuming', 'Deleting', 'Deleted', 'Undeleting', 'Canceled' - ProvisioningState DataLakeStoreAccountStatus `json:"provisioningState,omitempty"` - // State - READ-ONLY; The state of the Data Lake Store account. Possible values include: 'Active', 'Suspended' - State DataLakeStoreAccountState `json:"state,omitempty"` - // CreationTime - READ-ONLY; The account creation time. - CreationTime *date.Time `json:"creationTime,omitempty"` - // LastModifiedTime - READ-ONLY; The account last modified time. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Endpoint - READ-ONLY; The full CName endpoint for this account. - Endpoint *string `json:"endpoint,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountProperties. -func (dlsap DataLakeStoreAccountProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DataLakeStoreAccountPropertiesBasic the basic account specific properties that are associated with an -// underlying Data Lake Store account. -type DataLakeStoreAccountPropertiesBasic struct { - // AccountID - READ-ONLY; The unique identifier associated with this Data Lake Store account. - AccountID *uuid.UUID `json:"accountId,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning status of the Data Lake Store account. Possible values include: 'Failed', 'Creating', 'Running', 'Succeeded', 'Patching', 'Suspending', 'Resuming', 'Deleting', 'Deleted', 'Undeleting', 'Canceled' - ProvisioningState DataLakeStoreAccountStatus `json:"provisioningState,omitempty"` - // State - READ-ONLY; The state of the Data Lake Store account. Possible values include: 'Active', 'Suspended' - State DataLakeStoreAccountState `json:"state,omitempty"` - // CreationTime - READ-ONLY; The account creation time. - CreationTime *date.Time `json:"creationTime,omitempty"` - // LastModifiedTime - READ-ONLY; The account last modified time. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Endpoint - READ-ONLY; The full CName endpoint for this account. - Endpoint *string `json:"endpoint,omitempty"` -} - -// MarshalJSON is the custom marshaler for DataLakeStoreAccountPropertiesBasic. -func (dlsapb DataLakeStoreAccountPropertiesBasic) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// EncryptionConfig the encryption configuration for the account. -type EncryptionConfig struct { - // Type - The type of encryption configuration being used. Currently the only supported types are 'UserManaged' and 'ServiceManaged'. Possible values include: 'UserManaged', 'ServiceManaged' - Type EncryptionConfigType `json:"type,omitempty"` - // KeyVaultMetaInfo - The Key Vault information for connecting to user managed encryption keys. - KeyVaultMetaInfo *KeyVaultMetaInfo `json:"keyVaultMetaInfo,omitempty"` -} - -// EncryptionIdentity the encryption identity properties. -type EncryptionIdentity struct { - // Type - The type of encryption being used. Currently the only supported type is 'SystemAssigned'. - Type *string `json:"type,omitempty"` - // PrincipalID - READ-ONLY; The principal identifier associated with the encryption. - PrincipalID *uuid.UUID `json:"principalId,omitempty"` - // TenantID - READ-ONLY; The tenant identifier associated with the encryption. - TenantID *uuid.UUID `json:"tenantId,omitempty"` -} - -// MarshalJSON is the custom marshaler for EncryptionIdentity. -func (ei EncryptionIdentity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ei.Type != nil { - objectMap["type"] = ei.Type - } - return json.Marshal(objectMap) -} - -// FirewallRule data Lake Store firewall rule information. -type FirewallRule struct { - autorest.Response `json:"-"` - // FirewallRuleProperties - READ-ONLY; The firewall rule properties. - *FirewallRuleProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for FirewallRule. -func (fr FirewallRule) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for FirewallRule struct. -func (fr *FirewallRule) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var firewallRuleProperties FirewallRuleProperties - err = json.Unmarshal(*v, &firewallRuleProperties) - if err != nil { - return err - } - fr.FirewallRuleProperties = &firewallRuleProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - fr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - fr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - fr.Type = &typeVar - } - } - } - - return nil -} - -// FirewallRuleListResult data Lake Store firewall rule list information. -type FirewallRuleListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]FirewallRule `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for FirewallRuleListResult. -func (frlr FirewallRuleListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// FirewallRuleListResultIterator provides access to a complete listing of FirewallRule values. -type FirewallRuleListResultIterator struct { - i int - page FirewallRuleListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *FirewallRuleListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRuleListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *FirewallRuleListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter FirewallRuleListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter FirewallRuleListResultIterator) Response() FirewallRuleListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter FirewallRuleListResultIterator) Value() FirewallRule { - if !iter.page.NotDone() { - return FirewallRule{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the FirewallRuleListResultIterator type. -func NewFirewallRuleListResultIterator(page FirewallRuleListResultPage) FirewallRuleListResultIterator { - return FirewallRuleListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (frlr FirewallRuleListResult) IsEmpty() bool { - return frlr.Value == nil || len(*frlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (frlr FirewallRuleListResult) hasNextLink() bool { - return frlr.NextLink != nil && len(*frlr.NextLink) != 0 -} - -// firewallRuleListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (frlr FirewallRuleListResult) firewallRuleListResultPreparer(ctx context.Context) (*http.Request, error) { - if !frlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(frlr.NextLink))) -} - -// FirewallRuleListResultPage contains a page of FirewallRule values. -type FirewallRuleListResultPage struct { - fn func(context.Context, FirewallRuleListResult) (FirewallRuleListResult, error) - frlr FirewallRuleListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *FirewallRuleListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FirewallRuleListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.frlr) - if err != nil { - return err - } - page.frlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *FirewallRuleListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page FirewallRuleListResultPage) NotDone() bool { - return !page.frlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page FirewallRuleListResultPage) Response() FirewallRuleListResult { - return page.frlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page FirewallRuleListResultPage) Values() []FirewallRule { - if page.frlr.IsEmpty() { - return nil - } - return *page.frlr.Value -} - -// Creates a new instance of the FirewallRuleListResultPage type. -func NewFirewallRuleListResultPage(cur FirewallRuleListResult, getNextPage func(context.Context, FirewallRuleListResult) (FirewallRuleListResult, error)) FirewallRuleListResultPage { - return FirewallRuleListResultPage{ - fn: getNextPage, - frlr: cur, - } -} - -// FirewallRuleProperties the firewall rule properties. -type FirewallRuleProperties struct { - // StartIPAddress - READ-ONLY; The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - StartIPAddress *string `json:"startIpAddress,omitempty"` - // EndIPAddress - READ-ONLY; The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - EndIPAddress *string `json:"endIpAddress,omitempty"` -} - -// MarshalJSON is the custom marshaler for FirewallRuleProperties. -func (frp FirewallRuleProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyVaultMetaInfo metadata information used by account encryption. -type KeyVaultMetaInfo struct { - // KeyVaultResourceID - The resource identifier for the user managed Key Vault being used to encrypt. - KeyVaultResourceID *string `json:"keyVaultResourceId,omitempty"` - // EncryptionKeyName - The name of the user managed encryption key. - EncryptionKeyName *string `json:"encryptionKeyName,omitempty"` - // EncryptionKeyVersion - The version of the user managed encryption key. - EncryptionKeyVersion *string `json:"encryptionKeyVersion,omitempty"` -} - -// NameAvailabilityInformation data Lake Store account name availability result information. -type NameAvailabilityInformation struct { - autorest.Response `json:"-"` - // NameAvailable - READ-ONLY; The Boolean value of true or false to indicate whether the Data Lake Store account name is available or not. - NameAvailable *bool `json:"nameAvailable,omitempty"` - // Reason - READ-ONLY; The reason why the Data Lake Store account name is not available, if nameAvailable is false. - Reason *string `json:"reason,omitempty"` - // Message - READ-ONLY; The message describing why the Data Lake Store account name is not available, if nameAvailable is false. - Message *string `json:"message,omitempty"` -} - -// MarshalJSON is the custom marshaler for NameAvailabilityInformation. -func (nai NameAvailabilityInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Operation an available operation for Data Lake Store. -type Operation struct { - // Name - READ-ONLY; The name of the operation. - Name *string `json:"name,omitempty"` - // Display - The display information for the operation. - Display *OperationDisplay `json:"display,omitempty"` - // Origin - READ-ONLY; The intended executor of the operation. Possible values include: 'User', 'System', 'Usersystem' - Origin OperationOrigin `json:"origin,omitempty"` -} - -// MarshalJSON is the custom marshaler for Operation. -func (o Operation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if o.Display != nil { - objectMap["display"] = o.Display - } - return json.Marshal(objectMap) -} - -// OperationDisplay the display information for a particular operation. -type OperationDisplay struct { - // Provider - READ-ONLY; The resource provider of the operation. - Provider *string `json:"provider,omitempty"` - // Resource - READ-ONLY; The resource type of the operation. - Resource *string `json:"resource,omitempty"` - // Operation - READ-ONLY; A friendly name of the operation. - Operation *string `json:"operation,omitempty"` - // Description - READ-ONLY; A friendly description of the operation. - Description *string `json:"description,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationDisplay. -func (od OperationDisplay) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OperationListResult the list of available operations for Data Lake Store. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]Operation `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationListResult. -func (olr OperationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Resource the resource model definition. -type Resource struct { - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // Location - READ-ONLY; The resource location. - Location *string `json:"location,omitempty"` - // Tags - READ-ONLY; The resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SubResource the resource model definition for a nested resource. -type SubResource struct { - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for SubResource. -func (sr SubResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// TrustedIDProvider data Lake Store trusted identity provider information. -type TrustedIDProvider struct { - autorest.Response `json:"-"` - // TrustedIDProviderProperties - READ-ONLY; The trusted identity provider properties. - *TrustedIDProviderProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrustedIDProvider. -func (tip TrustedIDProvider) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for TrustedIDProvider struct. -func (tip *TrustedIDProvider) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var trustedIDProviderProperties TrustedIDProviderProperties - err = json.Unmarshal(*v, &trustedIDProviderProperties) - if err != nil { - return err - } - tip.TrustedIDProviderProperties = &trustedIDProviderProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - tip.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - tip.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - tip.Type = &typeVar - } - } - } - - return nil -} - -// TrustedIDProviderListResult data Lake Store trusted identity provider list information. -type TrustedIDProviderListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]TrustedIDProvider `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrustedIDProviderListResult. -func (tiplr TrustedIDProviderListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// TrustedIDProviderListResultIterator provides access to a complete listing of TrustedIDProvider values. -type TrustedIDProviderListResultIterator struct { - i int - page TrustedIDProviderListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *TrustedIDProviderListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProviderListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *TrustedIDProviderListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter TrustedIDProviderListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter TrustedIDProviderListResultIterator) Response() TrustedIDProviderListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter TrustedIDProviderListResultIterator) Value() TrustedIDProvider { - if !iter.page.NotDone() { - return TrustedIDProvider{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the TrustedIDProviderListResultIterator type. -func NewTrustedIDProviderListResultIterator(page TrustedIDProviderListResultPage) TrustedIDProviderListResultIterator { - return TrustedIDProviderListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (tiplr TrustedIDProviderListResult) IsEmpty() bool { - return tiplr.Value == nil || len(*tiplr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (tiplr TrustedIDProviderListResult) hasNextLink() bool { - return tiplr.NextLink != nil && len(*tiplr.NextLink) != 0 -} - -// trustedIDProviderListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (tiplr TrustedIDProviderListResult) trustedIDProviderListResultPreparer(ctx context.Context) (*http.Request, error) { - if !tiplr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(tiplr.NextLink))) -} - -// TrustedIDProviderListResultPage contains a page of TrustedIDProvider values. -type TrustedIDProviderListResultPage struct { - fn func(context.Context, TrustedIDProviderListResult) (TrustedIDProviderListResult, error) - tiplr TrustedIDProviderListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *TrustedIDProviderListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProviderListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.tiplr) - if err != nil { - return err - } - page.tiplr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *TrustedIDProviderListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page TrustedIDProviderListResultPage) NotDone() bool { - return !page.tiplr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page TrustedIDProviderListResultPage) Response() TrustedIDProviderListResult { - return page.tiplr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page TrustedIDProviderListResultPage) Values() []TrustedIDProvider { - if page.tiplr.IsEmpty() { - return nil - } - return *page.tiplr.Value -} - -// Creates a new instance of the TrustedIDProviderListResultPage type. -func NewTrustedIDProviderListResultPage(cur TrustedIDProviderListResult, getNextPage func(context.Context, TrustedIDProviderListResult) (TrustedIDProviderListResult, error)) TrustedIDProviderListResultPage { - return TrustedIDProviderListResultPage{ - fn: getNextPage, - tiplr: cur, - } -} - -// TrustedIDProviderProperties the trusted identity provider properties. -type TrustedIDProviderProperties struct { - // IDProvider - READ-ONLY; The URL of this trusted identity provider. - IDProvider *string `json:"idProvider,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrustedIDProviderProperties. -func (tipp TrustedIDProviderProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UpdateDataLakeStoreAccountParameters data Lake Store account information to update. -type UpdateDataLakeStoreAccountParameters struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // UpdateDataLakeStoreAccountProperties - The Data Lake Store account properties to update. - *UpdateDataLakeStoreAccountProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateDataLakeStoreAccountParameters. -func (udlsap UpdateDataLakeStoreAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if udlsap.Tags != nil { - objectMap["tags"] = udlsap.Tags - } - if udlsap.UpdateDataLakeStoreAccountProperties != nil { - objectMap["properties"] = udlsap.UpdateDataLakeStoreAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateDataLakeStoreAccountParameters struct. -func (udlsap *UpdateDataLakeStoreAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - udlsap.Tags = tags - } - case "properties": - if v != nil { - var updateDataLakeStoreAccountProperties UpdateDataLakeStoreAccountProperties - err = json.Unmarshal(*v, &updateDataLakeStoreAccountProperties) - if err != nil { - return err - } - udlsap.UpdateDataLakeStoreAccountProperties = &updateDataLakeStoreAccountProperties - } - } - } - - return nil -} - -// UpdateDataLakeStoreAccountProperties data Lake Store account properties information to be updated. -type UpdateDataLakeStoreAccountProperties struct { - // DefaultGroup - The default owner group for all new folders and files created in the Data Lake Store account. - DefaultGroup *string `json:"defaultGroup,omitempty"` - // EncryptionConfig - Used for rotation of user managed Key Vault keys. Can only be used to rotate a user managed encryption Key Vault key. - EncryptionConfig *UpdateEncryptionConfig `json:"encryptionConfig,omitempty"` - // FirewallRules - The list of firewall rules associated with this Data Lake Store account. - FirewallRules *[]UpdateFirewallRuleWithAccountParameters `json:"firewallRules,omitempty"` - // VirtualNetworkRules - The list of virtual network rules associated with this Data Lake Store account. - VirtualNetworkRules *[]UpdateVirtualNetworkRuleWithAccountParameters `json:"virtualNetworkRules,omitempty"` - // FirewallState - The current state of the IP address firewall for this Data Lake Store account. Disabling the firewall does not remove existing rules, they will just be ignored until the firewall is re-enabled. Possible values include: 'FirewallStateEnabled', 'FirewallStateDisabled' - FirewallState FirewallState `json:"firewallState,omitempty"` - // FirewallAllowAzureIps - The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced. Possible values include: 'FirewallAllowAzureIpsStateEnabled', 'FirewallAllowAzureIpsStateDisabled' - FirewallAllowAzureIps FirewallAllowAzureIpsState `json:"firewallAllowAzureIps,omitempty"` - // TrustedIDProviders - The list of trusted identity providers associated with this Data Lake Store account. - TrustedIDProviders *[]UpdateTrustedIDProviderWithAccountParameters `json:"trustedIdProviders,omitempty"` - // TrustedIDProviderState - The current state of the trusted identity provider feature for this Data Lake Store account. Disabling trusted identity provider functionality does not remove the providers, they will just be ignored until this feature is re-enabled. Possible values include: 'TrustedIDProviderStateEnabled', 'TrustedIDProviderStateDisabled' - TrustedIDProviderState TrustedIDProviderState `json:"trustedIdProviderState,omitempty"` - // NewTier - The commitment tier to use for next month. Possible values include: 'Consumption', 'Commitment1TB', 'Commitment10TB', 'Commitment100TB', 'Commitment500TB', 'Commitment1PB', 'Commitment5PB' - NewTier TierType `json:"newTier,omitempty"` -} - -// UpdateEncryptionConfig the encryption configuration used to update a user managed Key Vault key. -type UpdateEncryptionConfig struct { - // KeyVaultMetaInfo - The updated Key Vault key to use in user managed key rotation. - KeyVaultMetaInfo *UpdateKeyVaultMetaInfo `json:"keyVaultMetaInfo,omitempty"` -} - -// UpdateFirewallRuleParameters the parameters used to update a firewall rule. -type UpdateFirewallRuleParameters struct { - // UpdateFirewallRuleProperties - The firewall rule properties to use when updating a firewall rule. - *UpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateFirewallRuleParameters. -func (ufrp UpdateFirewallRuleParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ufrp.UpdateFirewallRuleProperties != nil { - objectMap["properties"] = ufrp.UpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateFirewallRuleParameters struct. -func (ufrp *UpdateFirewallRuleParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var updateFirewallRuleProperties UpdateFirewallRuleProperties - err = json.Unmarshal(*v, &updateFirewallRuleProperties) - if err != nil { - return err - } - ufrp.UpdateFirewallRuleProperties = &updateFirewallRuleProperties - } - } - } - - return nil -} - -// UpdateFirewallRuleProperties the firewall rule properties to use when updating a firewall rule. -type UpdateFirewallRuleProperties struct { - // StartIPAddress - The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - StartIPAddress *string `json:"startIpAddress,omitempty"` - // EndIPAddress - The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol. - EndIPAddress *string `json:"endIpAddress,omitempty"` -} - -// UpdateFirewallRuleWithAccountParameters the parameters used to update a firewall rule while updating a -// Data Lake Store account. -type UpdateFirewallRuleWithAccountParameters struct { - // Name - The unique name of the firewall rule to update. - Name *string `json:"name,omitempty"` - // UpdateFirewallRuleProperties - The firewall rule properties to use when updating a firewall rule. - *UpdateFirewallRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateFirewallRuleWithAccountParameters. -func (ufrwap UpdateFirewallRuleWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ufrwap.Name != nil { - objectMap["name"] = ufrwap.Name - } - if ufrwap.UpdateFirewallRuleProperties != nil { - objectMap["properties"] = ufrwap.UpdateFirewallRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateFirewallRuleWithAccountParameters struct. -func (ufrwap *UpdateFirewallRuleWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ufrwap.Name = &name - } - case "properties": - if v != nil { - var updateFirewallRuleProperties UpdateFirewallRuleProperties - err = json.Unmarshal(*v, &updateFirewallRuleProperties) - if err != nil { - return err - } - ufrwap.UpdateFirewallRuleProperties = &updateFirewallRuleProperties - } - } - } - - return nil -} - -// UpdateKeyVaultMetaInfo the Key Vault update information used for user managed key rotation. -type UpdateKeyVaultMetaInfo struct { - // EncryptionKeyVersion - The version of the user managed encryption key to update through a key rotation. - EncryptionKeyVersion *string `json:"encryptionKeyVersion,omitempty"` -} - -// UpdateTrustedIDProviderParameters the parameters used to update a trusted identity provider. -type UpdateTrustedIDProviderParameters struct { - // UpdateTrustedIDProviderProperties - The trusted identity provider properties to use when updating a trusted identity provider. - *UpdateTrustedIDProviderProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateTrustedIDProviderParameters. -func (utipp UpdateTrustedIDProviderParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if utipp.UpdateTrustedIDProviderProperties != nil { - objectMap["properties"] = utipp.UpdateTrustedIDProviderProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateTrustedIDProviderParameters struct. -func (utipp *UpdateTrustedIDProviderParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var updateTrustedIDProviderProperties UpdateTrustedIDProviderProperties - err = json.Unmarshal(*v, &updateTrustedIDProviderProperties) - if err != nil { - return err - } - utipp.UpdateTrustedIDProviderProperties = &updateTrustedIDProviderProperties - } - } - } - - return nil -} - -// UpdateTrustedIDProviderProperties the trusted identity provider properties to use when updating a -// trusted identity provider. -type UpdateTrustedIDProviderProperties struct { - // IDProvider - The URL of this trusted identity provider. - IDProvider *string `json:"idProvider,omitempty"` -} - -// UpdateTrustedIDProviderWithAccountParameters the parameters used to update a trusted identity provider -// while updating a Data Lake Store account. -type UpdateTrustedIDProviderWithAccountParameters struct { - // Name - The unique name of the trusted identity provider to update. - Name *string `json:"name,omitempty"` - // UpdateTrustedIDProviderProperties - The trusted identity provider properties to use when updating a trusted identity provider. - *UpdateTrustedIDProviderProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateTrustedIDProviderWithAccountParameters. -func (utipwap UpdateTrustedIDProviderWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if utipwap.Name != nil { - objectMap["name"] = utipwap.Name - } - if utipwap.UpdateTrustedIDProviderProperties != nil { - objectMap["properties"] = utipwap.UpdateTrustedIDProviderProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateTrustedIDProviderWithAccountParameters struct. -func (utipwap *UpdateTrustedIDProviderWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - utipwap.Name = &name - } - case "properties": - if v != nil { - var updateTrustedIDProviderProperties UpdateTrustedIDProviderProperties - err = json.Unmarshal(*v, &updateTrustedIDProviderProperties) - if err != nil { - return err - } - utipwap.UpdateTrustedIDProviderProperties = &updateTrustedIDProviderProperties - } - } - } - - return nil -} - -// UpdateVirtualNetworkRuleParameters the parameters used to update a virtual network rule. -type UpdateVirtualNetworkRuleParameters struct { - // UpdateVirtualNetworkRuleProperties - The virtual network rule properties to use when updating a virtual network rule. - *UpdateVirtualNetworkRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateVirtualNetworkRuleParameters. -func (uvnrp UpdateVirtualNetworkRuleParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if uvnrp.UpdateVirtualNetworkRuleProperties != nil { - objectMap["properties"] = uvnrp.UpdateVirtualNetworkRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateVirtualNetworkRuleParameters struct. -func (uvnrp *UpdateVirtualNetworkRuleParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var updateVirtualNetworkRuleProperties UpdateVirtualNetworkRuleProperties - err = json.Unmarshal(*v, &updateVirtualNetworkRuleProperties) - if err != nil { - return err - } - uvnrp.UpdateVirtualNetworkRuleProperties = &updateVirtualNetworkRuleProperties - } - } - } - - return nil -} - -// UpdateVirtualNetworkRuleProperties the virtual network rule properties to use when updating a virtual -// network rule. -type UpdateVirtualNetworkRuleProperties struct { - // SubnetID - The resource identifier for the subnet. - SubnetID *string `json:"subnetId,omitempty"` -} - -// UpdateVirtualNetworkRuleWithAccountParameters the parameters used to update a virtual network rule while -// updating a Data Lake Store account. -type UpdateVirtualNetworkRuleWithAccountParameters struct { - // Name - The unique name of the virtual network rule to update. - Name *string `json:"name,omitempty"` - // UpdateVirtualNetworkRuleProperties - The virtual network rule properties to use when updating a virtual network rule. - *UpdateVirtualNetworkRuleProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateVirtualNetworkRuleWithAccountParameters. -func (uvnrwap UpdateVirtualNetworkRuleWithAccountParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if uvnrwap.Name != nil { - objectMap["name"] = uvnrwap.Name - } - if uvnrwap.UpdateVirtualNetworkRuleProperties != nil { - objectMap["properties"] = uvnrwap.UpdateVirtualNetworkRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for UpdateVirtualNetworkRuleWithAccountParameters struct. -func (uvnrwap *UpdateVirtualNetworkRuleWithAccountParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - uvnrwap.Name = &name - } - case "properties": - if v != nil { - var updateVirtualNetworkRuleProperties UpdateVirtualNetworkRuleProperties - err = json.Unmarshal(*v, &updateVirtualNetworkRuleProperties) - if err != nil { - return err - } - uvnrwap.UpdateVirtualNetworkRuleProperties = &updateVirtualNetworkRuleProperties - } - } - } - - return nil -} - -// Usage describes the Resource Usage. -type Usage struct { - // Unit - READ-ONLY; Gets the unit of measurement. Possible values include: 'Count', 'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond' - Unit UsageUnit `json:"unit,omitempty"` - // ID - READ-ONLY; Resource identifier. - ID *string `json:"id,omitempty"` - // CurrentValue - READ-ONLY; Gets the current count of the allocated resources in the subscription. - CurrentValue *int32 `json:"currentValue,omitempty"` - // Limit - READ-ONLY; Gets the maximum count of the resources that can be allocated in the subscription. - Limit *int32 `json:"limit,omitempty"` - // Name - READ-ONLY; Gets the name of the type of usage. - Name *UsageName `json:"name,omitempty"` -} - -// MarshalJSON is the custom marshaler for Usage. -func (u Usage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UsageListResult the response from the List Usages operation. -type UsageListResult struct { - autorest.Response `json:"-"` - // Value - Gets or sets the list of Storage Resource Usages. - Value *[]Usage `json:"value,omitempty"` -} - -// UsageName the usage names that can be used. -type UsageName struct { - // Value - READ-ONLY; Gets a string describing the resource name. - Value *string `json:"value,omitempty"` - // LocalizedValue - READ-ONLY; Gets a localized string describing the resource name. - LocalizedValue *string `json:"localizedValue,omitempty"` -} - -// MarshalJSON is the custom marshaler for UsageName. -func (un UsageName) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// VirtualNetworkRule data Lake Store virtual network rule information. -type VirtualNetworkRule struct { - autorest.Response `json:"-"` - // VirtualNetworkRuleProperties - READ-ONLY; The virtual network rule properties. - *VirtualNetworkRuleProperties `json:"properties,omitempty"` - // ID - READ-ONLY; The resource identifier. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualNetworkRule. -func (vnr VirtualNetworkRule) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualNetworkRule struct. -func (vnr *VirtualNetworkRule) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var virtualNetworkRuleProperties VirtualNetworkRuleProperties - err = json.Unmarshal(*v, &virtualNetworkRuleProperties) - if err != nil { - return err - } - vnr.VirtualNetworkRuleProperties = &virtualNetworkRuleProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vnr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vnr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - vnr.Type = &typeVar - } - } - } - - return nil -} - -// VirtualNetworkRuleListResult data Lake Store virtual network rule list information. -type VirtualNetworkRuleListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The results of the list operation. - Value *[]VirtualNetworkRule `json:"value,omitempty"` - // NextLink - READ-ONLY; The link (url) to the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualNetworkRuleListResult. -func (vnrlr VirtualNetworkRuleListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// VirtualNetworkRuleListResultIterator provides access to a complete listing of VirtualNetworkRule values. -type VirtualNetworkRuleListResultIterator struct { - i int - page VirtualNetworkRuleListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualNetworkRuleListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRuleListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *VirtualNetworkRuleListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualNetworkRuleListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualNetworkRuleListResultIterator) Response() VirtualNetworkRuleListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualNetworkRuleListResultIterator) Value() VirtualNetworkRule { - if !iter.page.NotDone() { - return VirtualNetworkRule{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the VirtualNetworkRuleListResultIterator type. -func NewVirtualNetworkRuleListResultIterator(page VirtualNetworkRuleListResultPage) VirtualNetworkRuleListResultIterator { - return VirtualNetworkRuleListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (vnrlr VirtualNetworkRuleListResult) IsEmpty() bool { - return vnrlr.Value == nil || len(*vnrlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (vnrlr VirtualNetworkRuleListResult) hasNextLink() bool { - return vnrlr.NextLink != nil && len(*vnrlr.NextLink) != 0 -} - -// virtualNetworkRuleListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vnrlr VirtualNetworkRuleListResult) virtualNetworkRuleListResultPreparer(ctx context.Context) (*http.Request, error) { - if !vnrlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vnrlr.NextLink))) -} - -// VirtualNetworkRuleListResultPage contains a page of VirtualNetworkRule values. -type VirtualNetworkRuleListResultPage struct { - fn func(context.Context, VirtualNetworkRuleListResult) (VirtualNetworkRuleListResult, error) - vnrlr VirtualNetworkRuleListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualNetworkRuleListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRuleListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.vnrlr) - if err != nil { - return err - } - page.vnrlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *VirtualNetworkRuleListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualNetworkRuleListResultPage) NotDone() bool { - return !page.vnrlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualNetworkRuleListResultPage) Response() VirtualNetworkRuleListResult { - return page.vnrlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualNetworkRuleListResultPage) Values() []VirtualNetworkRule { - if page.vnrlr.IsEmpty() { - return nil - } - return *page.vnrlr.Value -} - -// Creates a new instance of the VirtualNetworkRuleListResultPage type. -func NewVirtualNetworkRuleListResultPage(cur VirtualNetworkRuleListResult, getNextPage func(context.Context, VirtualNetworkRuleListResult) (VirtualNetworkRuleListResult, error)) VirtualNetworkRuleListResultPage { - return VirtualNetworkRuleListResultPage{ - fn: getNextPage, - vnrlr: cur, - } -} - -// VirtualNetworkRuleProperties the virtual network rule properties. -type VirtualNetworkRuleProperties struct { - // SubnetID - READ-ONLY; The resource identifier for the subnet. - SubnetID *string `json:"subnetId,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualNetworkRuleProperties. -func (vnrp VirtualNetworkRuleProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/trustedidproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/trustedidproviders.go deleted file mode 100644 index da50f3777e5d..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/trustedidproviders.go +++ /dev/null @@ -1,481 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// TrustedIDProvidersClient is the creates an Azure Data Lake Store account management client. -type TrustedIDProvidersClient struct { - BaseClient -} - -// NewTrustedIDProvidersClient creates an instance of the TrustedIDProvidersClient client. -func NewTrustedIDProvidersClient(subscriptionID string) TrustedIDProvidersClient { - return NewTrustedIDProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewTrustedIDProvidersClientWithBaseURI creates an instance of the TrustedIDProvidersClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewTrustedIDProvidersClientWithBaseURI(baseURI string, subscriptionID string) TrustedIDProvidersClient { - return TrustedIDProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates the specified trusted identity provider. During update, the trusted identity -// provider with the specified name will be replaced with this new provider -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// trustedIDProviderName - the name of the trusted identity provider. This is used for differentiation of -// providers in the account. -// parameters - parameters supplied to create or replace the trusted identity provider. -func (client TrustedIDProvidersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string, parameters CreateOrUpdateTrustedIDProviderParameters) (result TrustedIDProvider, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProvidersClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CreateOrUpdateTrustedIDProviderProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateTrustedIDProviderProperties.IDProvider", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.TrustedIDProvidersClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, trustedIDProviderName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client TrustedIDProvidersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string, parameters CreateOrUpdateTrustedIDProviderParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "trustedIdProviderName": autorest.Encode("path", trustedIDProviderName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client TrustedIDProvidersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client TrustedIDProvidersClient) CreateOrUpdateResponder(resp *http.Response) (result TrustedIDProvider, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the specified trusted identity provider from the specified Data Lake Store account -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// trustedIDProviderName - the name of the trusted identity provider to delete. -func (client TrustedIDProvidersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProvidersClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, trustedIDProviderName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client TrustedIDProvidersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "trustedIdProviderName": autorest.Encode("path", trustedIDProviderName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client TrustedIDProvidersClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client TrustedIDProvidersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Store trusted identity provider. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// trustedIDProviderName - the name of the trusted identity provider to retrieve. -func (client TrustedIDProvidersClient) Get(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string) (result TrustedIDProvider, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProvidersClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, trustedIDProviderName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client TrustedIDProvidersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "trustedIdProviderName": autorest.Encode("path", trustedIDProviderName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client TrustedIDProvidersClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client TrustedIDProvidersClient) GetResponder(resp *http.Response) (result TrustedIDProvider, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount lists the Data Lake Store trusted identity providers within the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -func (client TrustedIDProvidersClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string) (result TrustedIDProviderListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProvidersClient.ListByAccount") - defer func() { - sc := -1 - if result.tiplr.Response.Response != nil { - sc = result.tiplr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.tiplr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.tiplr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.tiplr.hasNextLink() && result.tiplr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client TrustedIDProvidersClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client TrustedIDProvidersClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client TrustedIDProvidersClient) ListByAccountResponder(resp *http.Response) (result TrustedIDProviderListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client TrustedIDProvidersClient) listByAccountNextResults(ctx context.Context, lastResults TrustedIDProviderListResult) (result TrustedIDProviderListResult, err error) { - req, err := lastResults.trustedIDProviderListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client TrustedIDProvidersClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string) (result TrustedIDProviderListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProvidersClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName) - return -} - -// Update updates the specified trusted identity provider. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// trustedIDProviderName - the name of the trusted identity provider. This is used for differentiation of -// providers in the account. -// parameters - parameters supplied to update the trusted identity provider. -func (client TrustedIDProvidersClient) Update(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string, parameters *UpdateTrustedIDProviderParameters) (result TrustedIDProvider, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TrustedIDProvidersClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, trustedIDProviderName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.TrustedIDProvidersClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client TrustedIDProvidersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, trustedIDProviderName string, parameters *UpdateTrustedIDProviderParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "trustedIdProviderName": autorest.Encode("path", trustedIDProviderName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client TrustedIDProvidersClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client TrustedIDProvidersClient) UpdateResponder(resp *http.Response) (result TrustedIDProvider, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/version.go deleted file mode 100644 index 0834dcece3ec..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package account - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " account/2016-11-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/virtualnetworkrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/virtualnetworkrules.go deleted file mode 100644 index e26b28610418..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/virtualnetworkrules.go +++ /dev/null @@ -1,479 +0,0 @@ -package account - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// VirtualNetworkRulesClient is the creates an Azure Data Lake Store account management client. -type VirtualNetworkRulesClient struct { - BaseClient -} - -// NewVirtualNetworkRulesClient creates an instance of the VirtualNetworkRulesClient client. -func NewVirtualNetworkRulesClient(subscriptionID string) VirtualNetworkRulesClient { - return NewVirtualNetworkRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewVirtualNetworkRulesClientWithBaseURI creates an instance of the VirtualNetworkRulesClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewVirtualNetworkRulesClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkRulesClient { - return VirtualNetworkRulesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates the specified virtual network rule. During update, the virtual network rule with -// the specified name will be replaced with this new virtual network rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// virtualNetworkRuleName - the name of the virtual network rule to create or update. -// parameters - parameters supplied to create or update the virtual network rule. -func (client VirtualNetworkRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string, parameters CreateOrUpdateVirtualNetworkRuleParameters) (result VirtualNetworkRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRulesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CreateOrUpdateVirtualNetworkRuleProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.CreateOrUpdateVirtualNetworkRuleProperties.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("account.VirtualNetworkRulesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, virtualNetworkRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualNetworkRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string, parameters CreateOrUpdateVirtualNetworkRuleParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkRuleName": autorest.Encode("path", virtualNetworkRuleName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/virtualNetworkRules/{virtualNetworkRuleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client VirtualNetworkRulesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the specified virtual network rule from the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// virtualNetworkRuleName - the name of the virtual network rule to delete. -func (client VirtualNetworkRulesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRulesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, virtualNetworkRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client VirtualNetworkRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkRuleName": autorest.Encode("path", virtualNetworkRuleName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/virtualNetworkRules/{virtualNetworkRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client VirtualNetworkRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified Data Lake Store virtual network rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// virtualNetworkRuleName - the name of the virtual network rule to retrieve. -func (client VirtualNetworkRulesClient) Get(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string) (result VirtualNetworkRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRulesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, virtualNetworkRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client VirtualNetworkRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkRuleName": autorest.Encode("path", virtualNetworkRuleName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/virtualNetworkRules/{virtualNetworkRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client VirtualNetworkRulesClient) GetResponder(resp *http.Response) (result VirtualNetworkRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAccount lists the Data Lake Store virtual network rules within the specified Data Lake Store account. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -func (client VirtualNetworkRulesClient) ListByAccount(ctx context.Context, resourceGroupName string, accountName string) (result VirtualNetworkRuleListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRulesClient.ListByAccount") - defer func() { - sc := -1 - if result.vnrlr.Response.Response != nil { - sc = result.vnrlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByAccountNextResults - req, err := client.ListByAccountPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "ListByAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAccountSender(req) - if err != nil { - result.vnrlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "ListByAccount", resp, "Failure sending request") - return - } - - result.vnrlr, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "ListByAccount", resp, "Failure responding to request") - return - } - if result.vnrlr.hasNextLink() && result.vnrlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByAccountPreparer prepares the ListByAccount request. -func (client VirtualNetworkRulesClient) ListByAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/virtualNetworkRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAccountSender sends the ListByAccount request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkRulesClient) ListByAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAccountResponder handles the response to the ListByAccount request. The method always -// closes the http.Response Body. -func (client VirtualNetworkRulesClient) ListByAccountResponder(resp *http.Response) (result VirtualNetworkRuleListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByAccountNextResults retrieves the next set of results, if any. -func (client VirtualNetworkRulesClient) listByAccountNextResults(ctx context.Context, lastResults VirtualNetworkRuleListResult) (result VirtualNetworkRuleListResult, err error) { - req, err := lastResults.virtualNetworkRuleListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "listByAccountNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "listByAccountNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "listByAccountNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByAccountComplete enumerates all values, automatically crossing page boundaries as required. -func (client VirtualNetworkRulesClient) ListByAccountComplete(ctx context.Context, resourceGroupName string, accountName string) (result VirtualNetworkRuleListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRulesClient.ListByAccount") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByAccount(ctx, resourceGroupName, accountName) - return -} - -// Update updates the specified virtual network rule. -// Parameters: -// resourceGroupName - the name of the Azure resource group. -// accountName - the name of the Data Lake Store account. -// virtualNetworkRuleName - the name of the virtual network rule to update. -// parameters - parameters supplied to update the virtual network rule. -func (client VirtualNetworkRulesClient) Update(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string, parameters *UpdateVirtualNetworkRuleParameters) (result VirtualNetworkRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkRulesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, virtualNetworkRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "account.VirtualNetworkRulesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client VirtualNetworkRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, virtualNetworkRuleName string, parameters *UpdateVirtualNetworkRuleParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkRuleName": autorest.Encode("path", virtualNetworkRuleName), - } - - const APIVersion = "2016-11-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/virtualNetworkRules/{virtualNetworkRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client VirtualNetworkRulesClient) UpdateResponder(resp *http.Response) (result VirtualNetworkRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/CHANGELOG.md similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/CHANGELOG.md rename to vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/CHANGELOG.md diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/_meta.json new file mode 100644 index 000000000000..30791794366b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/_meta.json @@ -0,0 +1,11 @@ +{ + "commit": "5fcc6854765009e891052653b304cfe80353430a", + "readme": "/_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", + "tag": "package-2021-04", + "use": "@microsoft.azure/autorest.go@2.1.187", + "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-04 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/accounts.go new file mode 100644 index 000000000000..8eb85a11438f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/accounts.go @@ -0,0 +1,1444 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AccountsClient is the the Azure Storage Management API. +type AccountsClient struct { + BaseClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks that the storage account name is valid and is not already in use. +// Parameters: +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) CheckNameAvailability(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "CheckNameAvailability", err.Error()) + } + + req, err := client.CheckNameAvailabilityPreparer(ctx, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") + return + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), + autorest.WithJSON(accountName), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create asynchronously creates a new storage account with the specified parameters. If an account is already created +// and a subsequent create request is issued with different properties, the account properties will be updated. If an +// account is already created and a subsequent create or update request is issued with the exact same set of +// properties, the request will succeed. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide for the created account. +func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (result AccountsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.SasPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.SasPolicy.SasExpirationPeriod", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.SasPolicy.ExpirationAction", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.AccountPropertiesCreateParameters.KeyPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.KeyPolicy.KeyExpirationPeriodInDays", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.NetBiosDomainName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.ForestName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainGUID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainSid", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.AzureStorageSid", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a storage account in Microsoft Azure. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Failover failover request can be triggered for a storage account in case of availability issues. The failover occurs +// from the storage account's primary cluster to secondary cluster for RA-GRS accounts. The secondary cluster will +// become primary after failover. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Failover(ctx context.Context, resourceGroupName string, accountName string) (result AccountsFailoverFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Failover") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Failover", err.Error()) + } + + req, err := client.FailoverPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", nil, "Failure preparing request") + return + } + + result, err = client.FailoverSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", result.Response(), "Failure sending request") + return + } + + return +} + +// FailoverPreparer prepares the Failover request. +func (client AccountsClient) FailoverPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// FailoverSender sends the Failover request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) FailoverSender(req *http.Request) (future AccountsFailoverFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// FailoverResponder handles the response to the Failover request. The method always +// closes the http.Response Body. +func (client AccountsClient) FailoverResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name, +// location, and account status. The ListKeys operation should be used to retrieve storage keys. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// expand - may be used to expand the properties within account's properties. By default, data is not included +// when fetching properties. Currently we only support geoReplicationStats and blobRestoreStatus. +func (client AccountsClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (result Account, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.GetProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "GetProperties", err.Error()) + } + + req, err := client.GetPropertiesPreparer(ctx, resourceGroupName, accountName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use +// the ListKeys operation for this. +func (client AccountsClient) List(ctx context.Context) (result AccountListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") + defer func() { + sc := -1 + if result.alr.Response.Response != nil { + sc = result.alr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.alr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") + return + } + + result.alr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") + return + } + if result.alr.hasNextLink() && result.alr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client AccountsClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) { + req, err := lastResults.accountListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client AccountsClient) ListComplete(ctx context.Context) (result AccountListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListAccountSAS list SAS credentials of a storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide to list SAS credentials for the storage account. +func (client AccountsClient) ListAccountSAS(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListAccountSAS") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "ListAccountSAS", err.Error()) + } + + req, err := client.ListAccountSASPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request") + return + } + + resp, err := client.ListAccountSASSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request") + return + } + + result, err = client.ListAccountSASResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request") + return + } + + return +} + +// ListAccountSASPreparer prepares the ListAccountSAS request. +func (client AccountsClient) ListAccountSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAccountSASSender sends the ListAccountSAS request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys +// are not returned; use the ListKeys operation for this. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AccountListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.alr.Response.Response != nil { + sc = result.alr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.alr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.alr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") + return + } + if result.alr.hasNextLink() && result.alr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client AccountsClient) listByResourceGroupNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) { + req, err := lastResults.accountListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client AccountsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AccountListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// ListKeys lists the access keys or Kerberos keys (if active directory enabled) for the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// expand - specifies type of the key to be listed. Possible value is kerb. +func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (result AccountListKeysResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListKeys") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error()) + } + + req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") + return + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") + return + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") + return + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListServiceSAS list service SAS credentials of a specific resource. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide to list service SAS credentials. +func (client AccountsClient) ListServiceSAS(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListServiceSAS") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Identifier", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "ListServiceSAS", err.Error()) + } + + req, err := client.ListServiceSASPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request") + return + } + + resp, err := client.ListServiceSASSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request") + return + } + + result, err = client.ListServiceSASResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request") + return + } + + return +} + +// ListServiceSASPreparer prepares the ListServiceSAS request. +func (client AccountsClient) ListServiceSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListServiceSASSender sends the ListServiceSAS request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates one of the access keys or Kerberos keys for the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// regenerateKey - specifies name of the key which should be regenerated -- key1, key2, kerb1, kerb2. +func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RegenerateKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: regenerateKey, + Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "RegenerateKey", err.Error()) + } + + req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, regenerateKey) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + return + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") + return + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") + return + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), + autorest.WithJSON(regenerateKey), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RestoreBlobRanges restore blobs in the specified blob ranges +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide for restore blob ranges. +func (client AccountsClient) RestoreBlobRanges(ctx context.Context, resourceGroupName string, accountName string, parameters BlobRestoreParameters) (result AccountsRestoreBlobRangesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RestoreBlobRanges") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TimeToRestore", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.BlobRanges", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "RestoreBlobRanges", err.Error()) + } + + req, err := client.RestoreBlobRangesPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RestoreBlobRanges", nil, "Failure preparing request") + return + } + + result, err = client.RestoreBlobRangesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RestoreBlobRanges", result.Response(), "Failure sending request") + return + } + + return +} + +// RestoreBlobRangesPreparer prepares the RestoreBlobRanges request. +func (client AccountsClient) RestoreBlobRangesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobRestoreParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreBlobRangesSender sends the RestoreBlobRanges request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RestoreBlobRangesSender(req *http.Request) (future AccountsRestoreBlobRangesFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// RestoreBlobRangesResponder handles the response to the RestoreBlobRanges request. The method always +// closes the http.Response Body. +func (client AccountsClient) RestoreBlobRangesResponder(resp *http.Response) (result BlobRestoreStatus, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RevokeUserDelegationKeys revoke user delegation keys. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) RevokeUserDelegationKeys(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RevokeUserDelegationKeys") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "RevokeUserDelegationKeys", err.Error()) + } + + req, err := client.RevokeUserDelegationKeysPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeUserDelegationKeysSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure sending request") + return + } + + result, err = client.RevokeUserDelegationKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure responding to request") + return + } + + return +} + +// RevokeUserDelegationKeysPreparer prepares the RevokeUserDelegationKeys request. +func (client AccountsClient) RevokeUserDelegationKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RevokeUserDelegationKeysSender sends the RevokeUserDelegationKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RevokeUserDelegationKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// RevokeUserDelegationKeysResponder handles the response to the RevokeUserDelegationKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) RevokeUserDelegationKeysResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account. +// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account; +// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value +// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This +// call does not change the storage keys for the account. If you want to change the storage account keys, use the +// regenerate keys operation. The location and name of the storage account cannot be changed after creation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide for the updated account. +func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobcontainers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobcontainers.go new file mode 100644 index 000000000000..cedbf65f1873 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobcontainers.go @@ -0,0 +1,1539 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// BlobContainersClient is the the Azure Storage Management API. +type BlobContainersClient struct { + BaseClient +} + +// NewBlobContainersClient creates an instance of the BlobContainersClient client. +func NewBlobContainersClient(subscriptionID string) BlobContainersClient { + return NewBlobContainersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBlobContainersClientWithBaseURI creates an instance of the BlobContainersClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient { + return BlobContainersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ClearLegalHold clears legal hold tags. Clearing the same or non-existent tag results in an idempotent operation. +// ClearLegalHold clears out only the specified tags in the request. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// legalHold - the LegalHold property that will be clear from a blob container. +func (client BlobContainersClient) ClearLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ClearLegalHold") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: legalHold, + Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "ClearLegalHold", err.Error()) + } + + req, err := client.ClearLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", nil, "Failure preparing request") + return + } + + resp, err := client.ClearLegalHoldSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure sending request") + return + } + + result, err = client.ClearLegalHoldResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure responding to request") + return + } + + return +} + +// ClearLegalHoldPreparer prepares the ClearLegalHold request. +func (client BlobContainersClient) ClearLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + legalHold.HasLegalHold = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold", pathParameters), + autorest.WithJSON(legalHold), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ClearLegalHoldSender sends the ClearLegalHold request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ClearLegalHoldSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ClearLegalHoldResponder handles the response to the ClearLegalHold request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ClearLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create creates a new container under the specified account as described by request body. The container resource +// includes metadata and properties for that container. It does not include a list of the blobs contained by the +// container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// blobContainer - properties of the blob container to create. +func (client BlobContainersClient) Create(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client BlobContainersClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithJSON(blobContainer), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) CreateResponder(resp *http.Response) (result BlobContainer, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateImmutabilityPolicy creates or updates an unlocked immutability policy. ETag in If-Match is honored if +// given but not required for this operation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// parameters - the ImmutabilityPolicy Properties that will be created or updated to a blob container. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (result ImmutabilityPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.CreateOrUpdateImmutabilityPolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", err.Error()) + } + + req, err := client.CreateOrUpdateImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, parameters, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdateImmutabilityPolicyPreparer prepares the CreateOrUpdateImmutabilityPolicy request. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "immutabilityPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateImmutabilityPolicySender sends the CreateOrUpdateImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateImmutabilityPolicyResponder handles the response to the CreateOrUpdateImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes specified container under its account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +func (client BlobContainersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, containerName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BlobContainersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteImmutabilityPolicy aborts an unlocked immutability policy. The response of delete has +// immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this operation. Deleting a locked +// immutability policy is not allowed, the only way is to delete the container after deleting all expired blobs inside +// the policy locked container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) DeleteImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.DeleteImmutabilityPolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "DeleteImmutabilityPolicy", err.Error()) + } + + req, err := client.DeleteImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.DeleteImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure responding to request") + return + } + + return +} + +// DeleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. +func (client BlobContainersClient) DeleteImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "immutabilityPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteImmutabilityPolicySender sends the DeleteImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) DeleteImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) DeleteImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ExtendImmutabilityPolicy extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only +// action allowed on a Locked policy will be this action. ETag in If-Match is required for this operation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +// parameters - the ImmutabilityPolicy Properties that will be extended for a blob container. +func (client BlobContainersClient) ExtendImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (result ImmutabilityPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ExtendImmutabilityPolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "ExtendImmutabilityPolicy", err.Error()) + } + + req, err := client.ExtendImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.ExtendImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.ExtendImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure responding to request") + return + } + + return +} + +// ExtendImmutabilityPolicyPreparer prepares the ExtendImmutabilityPolicy request. +func (client BlobContainersClient) ExtendImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExtendImmutabilityPolicySender sends the ExtendImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ExtendImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ExtendImmutabilityPolicyResponder handles the response to the ExtendImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ExtendImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets properties of a specified container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +func (client BlobContainersClient) Get(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainer, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, containerName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client BlobContainersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) GetResponder(resp *http.Response) (result BlobContainer, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetImmutabilityPolicy gets the existing immutability policy along with the corresponding ETag in response headers +// and body. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) GetImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.GetImmutabilityPolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "GetImmutabilityPolicy", err.Error()) + } + + req, err := client.GetImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.GetImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.GetImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure responding to request") + return + } + + return +} + +// GetImmutabilityPolicyPreparer prepares the GetImmutabilityPolicy request. +func (client BlobContainersClient) GetImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "immutabilityPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetImmutabilityPolicySender sends the GetImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) GetImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetImmutabilityPolicyResponder handles the response to the GetImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) GetImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Lease the Lease Container operation establishes and manages a lock on a container for delete operations. The lock +// duration can be 15 to 60 seconds, or can be infinite. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// parameters - lease Container request body. +func (client BlobContainersClient) Lease(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (result LeaseContainerResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Lease") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Lease", err.Error()) + } + + req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, containerName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", nil, "Failure preparing request") + return + } + + resp, err := client.LeaseSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure sending request") + return + } + + result, err = client.LeaseResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure responding to request") + return + } + + return +} + +// LeasePreparer prepares the Lease request. +func (client BlobContainersClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// LeaseSender sends the Lease request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) LeaseSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// LeaseResponder handles the response to the Lease request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) LeaseResponder(resp *http.Response) (result LeaseContainerResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all containers and does not support a prefix like data plane. Also SRP today does not return continuation +// token. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// maxpagesize - optional. Specified maximum number of containers that can be included in the list. +// filter - optional. When specified, only container names starting with the filter will be listed. +// include - optional, used to include the properties for soft deleted blob containers. +func (client BlobContainersClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (result ListContainerItemsPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List") + defer func() { + sc := -1 + if result.lci.Response.Response != nil { + sc = result.lci.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter, include) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lci.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure sending request") + return + } + + result.lci, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure responding to request") + return + } + if result.lci.hasNextLink() && result.lci.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client BlobContainersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(maxpagesize) > 0 { + queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(string(include)) > 0 { + queryParameters["$include"] = autorest.Encode("query", include) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ListResponder(resp *http.Response) (result ListContainerItems, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client BlobContainersClient) listNextResults(ctx context.Context, lastResults ListContainerItems) (result ListContainerItems, err error) { + req, err := lastResults.listContainerItemsPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client BlobContainersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (result ListContainerItemsIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter, include) + return +} + +// LockImmutabilityPolicy sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is +// ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) LockImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.LockImmutabilityPolicy") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "LockImmutabilityPolicy", err.Error()) + } + + req, err := client.LockImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.LockImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.LockImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure responding to request") + return + } + + return +} + +// LockImmutabilityPolicyPreparer prepares the LockImmutabilityPolicy request. +func (client BlobContainersClient) LockImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// LockImmutabilityPolicySender sends the LockImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) LockImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// LockImmutabilityPolicyResponder handles the response to the LockImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) LockImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ObjectLevelWorm this operation migrates a blob container from container level WORM to object level immutability +// enabled container. Prerequisites require a container level immutability policy either in locked or unlocked state, +// Account level versioning must be enabled and there should be no Legal hold on the container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +func (client BlobContainersClient) ObjectLevelWorm(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainersObjectLevelWormFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ObjectLevelWorm") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "ObjectLevelWorm", err.Error()) + } + + req, err := client.ObjectLevelWormPreparer(ctx, resourceGroupName, accountName, containerName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ObjectLevelWorm", nil, "Failure preparing request") + return + } + + result, err = client.ObjectLevelWormSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ObjectLevelWorm", result.Response(), "Failure sending request") + return + } + + return +} + +// ObjectLevelWormPreparer prepares the ObjectLevelWorm request. +func (client BlobContainersClient) ObjectLevelWormPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/migrate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ObjectLevelWormSender sends the ObjectLevelWorm request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ObjectLevelWormSender(req *http.Request) (future BlobContainersObjectLevelWormFuture, err error) { + var resp *http.Response + future.FutureAPI = &azure.Future{} + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// ObjectLevelWormResponder handles the response to the ObjectLevelWorm request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ObjectLevelWormResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// SetLegalHold sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold follows an +// append pattern and does not clear out the existing tags that are not specified in the request. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// legalHold - the LegalHold property that will be set to a blob container. +func (client BlobContainersClient) SetLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.SetLegalHold") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: legalHold, + Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "SetLegalHold", err.Error()) + } + + req, err := client.SetLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", nil, "Failure preparing request") + return + } + + resp, err := client.SetLegalHoldSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure sending request") + return + } + + result, err = client.SetLegalHoldResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure responding to request") + return + } + + return +} + +// SetLegalHoldPreparer prepares the SetLegalHold request. +func (client BlobContainersClient) SetLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + legalHold.HasLegalHold = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold", pathParameters), + autorest.WithJSON(legalHold), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetLegalHoldSender sends the SetLegalHold request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) SetLegalHoldSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// SetLegalHoldResponder handles the response to the SetLegalHold request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) SetLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates container properties as specified in request body. Properties not mentioned in the request will be +// unchanged. Update fails if the specified container doesn't already exist. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// blobContainer - properties to update for the blob container. +func (client BlobContainersClient) Update(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client BlobContainersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithJSON(blobContainer), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) UpdateResponder(resp *http.Response) (result BlobContainer, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobinventorypolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobinventorypolicies.go new file mode 100644 index 000000000000..b37de3a42973 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobinventorypolicies.go @@ -0,0 +1,410 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// BlobInventoryPoliciesClient is the the Azure Storage Management API. +type BlobInventoryPoliciesClient struct { + BaseClient +} + +// NewBlobInventoryPoliciesClient creates an instance of the BlobInventoryPoliciesClient client. +func NewBlobInventoryPoliciesClient(subscriptionID string) BlobInventoryPoliciesClient { + return NewBlobInventoryPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBlobInventoryPoliciesClientWithBaseURI creates an instance of the BlobInventoryPoliciesClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewBlobInventoryPoliciesClientWithBaseURI(baseURI string, subscriptionID string) BlobInventoryPoliciesClient { + return BlobInventoryPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate sets the blob inventory policy to the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// properties - the blob inventory policy set to a storage account. +func (client BlobInventoryPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties BlobInventoryPolicy) (result BlobInventoryPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: properties, + Constraints: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties.Policy", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties.Policy.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "properties.BlobInventoryPolicyProperties.Policy.Type", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "properties.BlobInventoryPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("storage.BlobInventoryPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client BlobInventoryPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties BlobInventoryPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "blobInventoryPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters), + autorest.WithJSON(properties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client BlobInventoryPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client BlobInventoryPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result BlobInventoryPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the blob inventory policy associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobInventoryPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobInventoryPoliciesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BlobInventoryPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "blobInventoryPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BlobInventoryPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BlobInventoryPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the blob inventory policy associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobInventoryPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result BlobInventoryPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobInventoryPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client BlobInventoryPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "blobInventoryPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BlobInventoryPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BlobInventoryPoliciesClient) GetResponder(resp *http.Response) (result BlobInventoryPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets the blob inventory policy associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobInventoryPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListBlobInventoryPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobInventoryPoliciesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client BlobInventoryPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BlobInventoryPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BlobInventoryPoliciesClient) ListResponder(resp *http.Response) (result ListBlobInventoryPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobservices.go new file mode 100644 index 000000000000..d9553078ea3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/blobservices.go @@ -0,0 +1,344 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// BlobServicesClient is the the Azure Storage Management API. +type BlobServicesClient struct { + BaseClient +} + +// NewBlobServicesClient creates an instance of the BlobServicesClient client. +func NewBlobServicesClient(subscriptionID string) BlobServicesClient { + return NewBlobServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBlobServicesClientWithBaseURI creates an instance of the BlobServicesClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient { + return BlobServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetServiceProperties gets the properties of a storage account’s Blob service, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.GetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobServicesClient", "GetServiceProperties", err.Error()) + } + + req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.GetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetServicePropertiesPreparer prepares the GetServiceProperties request. +func (client BlobServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "BlobServicesName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client BlobServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always +// closes the http.Response Body. +func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list blob services of storage account. It returns a collection of one object named default. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobServicesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the properties of a storage account’s Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (client BlobServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (result BlobServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.SetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, + {Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}, + {Target: "parameters.BlobServicePropertiesProperties.ChangeFeed", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.InclusiveMaximum, Rule: int64(146000), Chain: nil}, + {Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}, + {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, + {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}, + {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, + {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}, + {Target: "parameters.BlobServicePropertiesProperties.LastAccessTimeTrackingPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.LastAccessTimeTrackingPolicy.Enable", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("storage.BlobServicesClient", "SetServiceProperties", err.Error()) + } + + req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.SetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// SetServicePropertiesPreparer prepares the SetServiceProperties request. +func (client BlobServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "BlobServicesName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.Sku = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client BlobServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always +// closes the http.Response Body. +func (client BlobServicesClient) SetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/client.go similarity index 79% rename from vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/client.go index caf0310e4830..4d3fa84b6325 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/client.go @@ -1,7 +1,7 @@ -// Package account implements the Azure ARM Account service API version 2016-11-01. +// Package storage implements the Azure ARM Storage service API version 2021-04-01. // -// Creates an Azure Data Lake Store account management client. -package account +// The Azure Storage Management API. +package storage // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. @@ -14,11 +14,11 @@ import ( ) const ( - // DefaultBaseURI is the default URI used for the service Account + // DefaultBaseURI is the default URI used for the service Storage DefaultBaseURI = "https://management.azure.com" ) -// BaseClient is the base client for Account. +// BaseClient is the base client for Storage. type BaseClient struct { autorest.Client BaseURI string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/deletedaccounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/deletedaccounts.go new file mode 100644 index 000000000000..4019996f917a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/deletedaccounts.go @@ -0,0 +1,236 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DeletedAccountsClient is the the Azure Storage Management API. +type DeletedAccountsClient struct { + BaseClient +} + +// NewDeletedAccountsClient creates an instance of the DeletedAccountsClient client. +func NewDeletedAccountsClient(subscriptionID string) DeletedAccountsClient { + return NewDeletedAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeletedAccountsClientWithBaseURI creates an instance of the DeletedAccountsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewDeletedAccountsClientWithBaseURI(baseURI string, subscriptionID string) DeletedAccountsClient { + return DeletedAccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get properties of specified deleted account resource. +// Parameters: +// deletedAccountName - name of the deleted storage account. +// location - the location of the deleted storage account. +func (client DeletedAccountsClient) Get(ctx context.Context, deletedAccountName string, location string) (result DeletedAccount, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deletedAccountName, + Constraints: []validation.Constraint{{Target: "deletedAccountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "deletedAccountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.DeletedAccountsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, deletedAccountName, location) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeletedAccountsClient) GetPreparer(ctx context.Context, deletedAccountName string, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deletedAccountName": autorest.Encode("path", deletedAccountName), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/deletedAccounts/{deletedAccountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeletedAccountsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeletedAccountsClient) GetResponder(resp *http.Response) (result DeletedAccount, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists deleted accounts under the subscription. +func (client DeletedAccountsClient) List(ctx context.Context) (result DeletedAccountListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.List") + defer func() { + sc := -1 + if result.dalr.Response.Response != nil { + sc = result.dalr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.DeletedAccountsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dalr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", resp, "Failure sending request") + return + } + + result.dalr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", resp, "Failure responding to request") + return + } + if result.dalr.hasNextLink() && result.dalr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client DeletedAccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/deletedAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeletedAccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeletedAccountsClient) ListResponder(resp *http.Response) (result DeletedAccountListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DeletedAccountsClient) listNextResults(ctx context.Context, lastResults DeletedAccountListResult) (result DeletedAccountListResult, err error) { + req, err := lastResults.deletedAccountListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeletedAccountsClient) ListComplete(ctx context.Context) (result DeletedAccountListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/encryptionscopes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/encryptionscopes.go new file mode 100644 index 000000000000..a904eeb7ebec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/encryptionscopes.go @@ -0,0 +1,469 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// EncryptionScopesClient is the the Azure Storage Management API. +type EncryptionScopesClient struct { + BaseClient +} + +// NewEncryptionScopesClient creates an instance of the EncryptionScopesClient client. +func NewEncryptionScopesClient(subscriptionID string) EncryptionScopesClient { + return NewEncryptionScopesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEncryptionScopesClientWithBaseURI creates an instance of the EncryptionScopesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewEncryptionScopesClientWithBaseURI(baseURI string, subscriptionID string) EncryptionScopesClient { + return EncryptionScopesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get returns the properties for the specified encryption scope. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption +// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) +// only. Every dash (-) character must be immediately preceded and followed by a letter or number. +func (client EncryptionScopesClient) Get(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (result EncryptionScope, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: encryptionScopeName, + Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.EncryptionScopesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, encryptionScopeName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client EncryptionScopesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "encryptionScopeName": autorest.Encode("path", encryptionScopeName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client EncryptionScopesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client EncryptionScopesClient) GetResponder(resp *http.Response) (result EncryptionScope, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the encryption scopes available under the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client EncryptionScopesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result EncryptionScopeListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.List") + defer func() { + sc := -1 + if result.eslr.Response.Response != nil { + sc = result.eslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.EncryptionScopesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.eslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", resp, "Failure sending request") + return + } + + result.eslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", resp, "Failure responding to request") + return + } + if result.eslr.hasNextLink() && result.eslr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client EncryptionScopesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client EncryptionScopesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client EncryptionScopesClient) ListResponder(resp *http.Response) (result EncryptionScopeListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client EncryptionScopesClient) listNextResults(ctx context.Context, lastResults EncryptionScopeListResult) (result EncryptionScopeListResult, err error) { + req, err := lastResults.encryptionScopeListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client EncryptionScopesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result EncryptionScopeListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName) + return +} + +// Patch update encryption scope properties as specified in the request body. Update fails if the specified encryption +// scope does not already exist. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption +// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) +// only. Every dash (-) character must be immediately preceded and followed by a letter or number. +// encryptionScope - encryption scope properties to be used for the update. +func (client EncryptionScopesClient) Patch(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (result EncryptionScope, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Patch") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: encryptionScopeName, + Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.EncryptionScopesClient", "Patch", err.Error()) + } + + req, err := client.PatchPreparer(ctx, resourceGroupName, accountName, encryptionScopeName, encryptionScope) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", nil, "Failure preparing request") + return + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", resp, "Failure sending request") + return + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", resp, "Failure responding to request") + return + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client EncryptionScopesClient) PatchPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "encryptionScopeName": autorest.Encode("path", encryptionScopeName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters), + autorest.WithJSON(encryptionScope), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client EncryptionScopesClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client EncryptionScopesClient) PatchResponder(resp *http.Response) (result EncryptionScope, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Put synchronously creates or updates an encryption scope under the specified storage account. If an encryption scope +// is already created and a subsequent request is issued with different properties, the encryption scope properties +// will be updated per the specified request. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption +// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) +// only. Every dash (-) character must be immediately preceded and followed by a letter or number. +// encryptionScope - encryption scope properties to be used for the create or update. +func (client EncryptionScopesClient) Put(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (result EncryptionScope, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Put") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: encryptionScopeName, + Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.EncryptionScopesClient", "Put", err.Error()) + } + + req, err := client.PutPreparer(ctx, resourceGroupName, accountName, encryptionScopeName, encryptionScope) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", nil, "Failure preparing request") + return + } + + resp, err := client.PutSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", resp, "Failure sending request") + return + } + + result, err = client.PutResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", resp, "Failure responding to request") + return + } + + return +} + +// PutPreparer prepares the Put request. +func (client EncryptionScopesClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "encryptionScopeName": autorest.Encode("path", encryptionScopeName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters), + autorest.WithJSON(encryptionScope), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PutSender sends the Put request. The method will close the +// http.Response Body if it receives an error. +func (client EncryptionScopesClient) PutSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// PutResponder handles the response to the Put request. The method always +// closes the http.Response Body. +func (client EncryptionScopesClient) PutResponder(resp *http.Response) (result EncryptionScope, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/enums.go new file mode 100644 index 000000000000..e46f629d1ee7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/enums.go @@ -0,0 +1,924 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// AccessTier enumerates the values for access tier. +type AccessTier string + +const ( + // AccessTierCool ... + AccessTierCool AccessTier = "Cool" + // AccessTierHot ... + AccessTierHot AccessTier = "Hot" +) + +// PossibleAccessTierValues returns an array of possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{AccessTierCool, AccessTierHot} +} + +// AccountExpand enumerates the values for account expand. +type AccountExpand string + +const ( + // AccountExpandBlobRestoreStatus ... + AccountExpandBlobRestoreStatus AccountExpand = "blobRestoreStatus" + // AccountExpandGeoReplicationStats ... + AccountExpandGeoReplicationStats AccountExpand = "geoReplicationStats" +) + +// PossibleAccountExpandValues returns an array of possible values for the AccountExpand const type. +func PossibleAccountExpandValues() []AccountExpand { + return []AccountExpand{AccountExpandBlobRestoreStatus, AccountExpandGeoReplicationStats} +} + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // AccountStatusAvailable ... + AccountStatusAvailable AccountStatus = "available" + // AccountStatusUnavailable ... + AccountStatusUnavailable AccountStatus = "unavailable" +) + +// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type. +func PossibleAccountStatusValues() []AccountStatus { + return []AccountStatus{AccountStatusAvailable, AccountStatusUnavailable} +} + +// Action enumerates the values for action. +type Action string + +const ( + // ActionAllow ... + ActionAllow Action = "Allow" +) + +// PossibleActionValues returns an array of possible values for the Action const type. +func PossibleActionValues() []Action { + return []Action{ActionAllow} +} + +// Action1 enumerates the values for action 1. +type Action1 string + +const ( + // Action1Acquire ... + Action1Acquire Action1 = "Acquire" + // Action1Break ... + Action1Break Action1 = "Break" + // Action1Change ... + Action1Change Action1 = "Change" + // Action1Release ... + Action1Release Action1 = "Release" + // Action1Renew ... + Action1Renew Action1 = "Renew" +) + +// PossibleAction1Values returns an array of possible values for the Action1 const type. +func PossibleAction1Values() []Action1 { + return []Action1{Action1Acquire, Action1Break, Action1Change, Action1Release, Action1Renew} +} + +// BlobRestoreProgressStatus enumerates the values for blob restore progress status. +type BlobRestoreProgressStatus string + +const ( + // BlobRestoreProgressStatusComplete ... + BlobRestoreProgressStatusComplete BlobRestoreProgressStatus = "Complete" + // BlobRestoreProgressStatusFailed ... + BlobRestoreProgressStatusFailed BlobRestoreProgressStatus = "Failed" + // BlobRestoreProgressStatusInProgress ... + BlobRestoreProgressStatusInProgress BlobRestoreProgressStatus = "InProgress" +) + +// PossibleBlobRestoreProgressStatusValues returns an array of possible values for the BlobRestoreProgressStatus const type. +func PossibleBlobRestoreProgressStatusValues() []BlobRestoreProgressStatus { + return []BlobRestoreProgressStatus{BlobRestoreProgressStatusComplete, BlobRestoreProgressStatusFailed, BlobRestoreProgressStatusInProgress} +} + +// Bypass enumerates the values for bypass. +type Bypass string + +const ( + // BypassAzureServices ... + BypassAzureServices Bypass = "AzureServices" + // BypassLogging ... + BypassLogging Bypass = "Logging" + // BypassMetrics ... + BypassMetrics Bypass = "Metrics" + // BypassNone ... + BypassNone Bypass = "None" +) + +// PossibleBypassValues returns an array of possible values for the Bypass const type. +func PossibleBypassValues() []Bypass { + return []Bypass{BypassAzureServices, BypassLogging, BypassMetrics, BypassNone} +} + +// CreatedByType enumerates the values for created by type. +type CreatedByType string + +const ( + // CreatedByTypeApplication ... + CreatedByTypeApplication CreatedByType = "Application" + // CreatedByTypeKey ... + CreatedByTypeKey CreatedByType = "Key" + // CreatedByTypeManagedIdentity ... + CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" + // CreatedByTypeUser ... + CreatedByTypeUser CreatedByType = "User" +) + +// PossibleCreatedByTypeValues returns an array of possible values for the CreatedByType const type. +func PossibleCreatedByTypeValues() []CreatedByType { + return []CreatedByType{CreatedByTypeApplication, CreatedByTypeKey, CreatedByTypeManagedIdentity, CreatedByTypeUser} +} + +// DefaultAction enumerates the values for default action. +type DefaultAction string + +const ( + // DefaultActionAllow ... + DefaultActionAllow DefaultAction = "Allow" + // DefaultActionDeny ... + DefaultActionDeny DefaultAction = "Deny" +) + +// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type. +func PossibleDefaultActionValues() []DefaultAction { + return []DefaultAction{DefaultActionAllow, DefaultActionDeny} +} + +// DefaultSharePermission enumerates the values for default share permission. +type DefaultSharePermission string + +const ( + // DefaultSharePermissionNone ... + DefaultSharePermissionNone DefaultSharePermission = "None" + // DefaultSharePermissionStorageFileDataSmbShareContributor ... + DefaultSharePermissionStorageFileDataSmbShareContributor DefaultSharePermission = "StorageFileDataSmbShareContributor" + // DefaultSharePermissionStorageFileDataSmbShareElevatedContributor ... + DefaultSharePermissionStorageFileDataSmbShareElevatedContributor DefaultSharePermission = "StorageFileDataSmbShareElevatedContributor" + // DefaultSharePermissionStorageFileDataSmbShareOwner ... + DefaultSharePermissionStorageFileDataSmbShareOwner DefaultSharePermission = "StorageFileDataSmbShareOwner" + // DefaultSharePermissionStorageFileDataSmbShareReader ... + DefaultSharePermissionStorageFileDataSmbShareReader DefaultSharePermission = "StorageFileDataSmbShareReader" +) + +// PossibleDefaultSharePermissionValues returns an array of possible values for the DefaultSharePermission const type. +func PossibleDefaultSharePermissionValues() []DefaultSharePermission { + return []DefaultSharePermission{DefaultSharePermissionNone, DefaultSharePermissionStorageFileDataSmbShareContributor, DefaultSharePermissionStorageFileDataSmbShareElevatedContributor, DefaultSharePermissionStorageFileDataSmbShareOwner, DefaultSharePermissionStorageFileDataSmbShareReader} +} + +// DirectoryServiceOptions enumerates the values for directory service options. +type DirectoryServiceOptions string + +const ( + // DirectoryServiceOptionsAADDS ... + DirectoryServiceOptionsAADDS DirectoryServiceOptions = "AADDS" + // DirectoryServiceOptionsAD ... + DirectoryServiceOptionsAD DirectoryServiceOptions = "AD" + // DirectoryServiceOptionsNone ... + DirectoryServiceOptionsNone DirectoryServiceOptions = "None" +) + +// PossibleDirectoryServiceOptionsValues returns an array of possible values for the DirectoryServiceOptions const type. +func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions { + return []DirectoryServiceOptions{DirectoryServiceOptionsAADDS, DirectoryServiceOptionsAD, DirectoryServiceOptionsNone} +} + +// EnabledProtocols enumerates the values for enabled protocols. +type EnabledProtocols string + +const ( + // EnabledProtocolsNFS ... + EnabledProtocolsNFS EnabledProtocols = "NFS" + // EnabledProtocolsSMB ... + EnabledProtocolsSMB EnabledProtocols = "SMB" +) + +// PossibleEnabledProtocolsValues returns an array of possible values for the EnabledProtocols const type. +func PossibleEnabledProtocolsValues() []EnabledProtocols { + return []EnabledProtocols{EnabledProtocolsNFS, EnabledProtocolsSMB} +} + +// EncryptionScopeSource enumerates the values for encryption scope source. +type EncryptionScopeSource string + +const ( + // EncryptionScopeSourceMicrosoftKeyVault ... + EncryptionScopeSourceMicrosoftKeyVault EncryptionScopeSource = "Microsoft.KeyVault" + // EncryptionScopeSourceMicrosoftStorage ... + EncryptionScopeSourceMicrosoftStorage EncryptionScopeSource = "Microsoft.Storage" +) + +// PossibleEncryptionScopeSourceValues returns an array of possible values for the EncryptionScopeSource const type. +func PossibleEncryptionScopeSourceValues() []EncryptionScopeSource { + return []EncryptionScopeSource{EncryptionScopeSourceMicrosoftKeyVault, EncryptionScopeSourceMicrosoftStorage} +} + +// EncryptionScopeState enumerates the values for encryption scope state. +type EncryptionScopeState string + +const ( + // EncryptionScopeStateDisabled ... + EncryptionScopeStateDisabled EncryptionScopeState = "Disabled" + // EncryptionScopeStateEnabled ... + EncryptionScopeStateEnabled EncryptionScopeState = "Enabled" +) + +// PossibleEncryptionScopeStateValues returns an array of possible values for the EncryptionScopeState const type. +func PossibleEncryptionScopeStateValues() []EncryptionScopeState { + return []EncryptionScopeState{EncryptionScopeStateDisabled, EncryptionScopeStateEnabled} +} + +// ExtendedLocationTypes enumerates the values for extended location types. +type ExtendedLocationTypes string + +const ( + // ExtendedLocationTypesEdgeZone ... + ExtendedLocationTypesEdgeZone ExtendedLocationTypes = "EdgeZone" +) + +// PossibleExtendedLocationTypesValues returns an array of possible values for the ExtendedLocationTypes const type. +func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes { + return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone} +} + +// Format enumerates the values for format. +type Format string + +const ( + // FormatCsv ... + FormatCsv Format = "Csv" + // FormatParquet ... + FormatParquet Format = "Parquet" +) + +// PossibleFormatValues returns an array of possible values for the Format const type. +func PossibleFormatValues() []Format { + return []Format{FormatCsv, FormatParquet} +} + +// GeoReplicationStatus enumerates the values for geo replication status. +type GeoReplicationStatus string + +const ( + // GeoReplicationStatusBootstrap ... + GeoReplicationStatusBootstrap GeoReplicationStatus = "Bootstrap" + // GeoReplicationStatusLive ... + GeoReplicationStatusLive GeoReplicationStatus = "Live" + // GeoReplicationStatusUnavailable ... + GeoReplicationStatusUnavailable GeoReplicationStatus = "Unavailable" +) + +// PossibleGeoReplicationStatusValues returns an array of possible values for the GeoReplicationStatus const type. +func PossibleGeoReplicationStatusValues() []GeoReplicationStatus { + return []GeoReplicationStatus{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusUnavailable} +} + +// HTTPProtocol enumerates the values for http protocol. +type HTTPProtocol string + +const ( + // HTTPProtocolHTTPS ... + HTTPProtocolHTTPS HTTPProtocol = "https" + // HTTPProtocolHttpshttp ... + HTTPProtocolHttpshttp HTTPProtocol = "https,http" +) + +// PossibleHTTPProtocolValues returns an array of possible values for the HTTPProtocol const type. +func PossibleHTTPProtocolValues() []HTTPProtocol { + return []HTTPProtocol{HTTPProtocolHTTPS, HTTPProtocolHttpshttp} +} + +// IdentityType enumerates the values for identity type. +type IdentityType string + +const ( + // IdentityTypeNone ... + IdentityTypeNone IdentityType = "None" + // IdentityTypeSystemAssigned ... + IdentityTypeSystemAssigned IdentityType = "SystemAssigned" + // IdentityTypeSystemAssignedUserAssigned ... + IdentityTypeSystemAssignedUserAssigned IdentityType = "SystemAssigned,UserAssigned" + // IdentityTypeUserAssigned ... + IdentityTypeUserAssigned IdentityType = "UserAssigned" +) + +// PossibleIdentityTypeValues returns an array of possible values for the IdentityType const type. +func PossibleIdentityTypeValues() []IdentityType { + return []IdentityType{IdentityTypeNone, IdentityTypeSystemAssigned, IdentityTypeSystemAssignedUserAssigned, IdentityTypeUserAssigned} +} + +// ImmutabilityPolicyState enumerates the values for immutability policy state. +type ImmutabilityPolicyState string + +const ( + // ImmutabilityPolicyStateLocked ... + ImmutabilityPolicyStateLocked ImmutabilityPolicyState = "Locked" + // ImmutabilityPolicyStateUnlocked ... + ImmutabilityPolicyStateUnlocked ImmutabilityPolicyState = "Unlocked" +) + +// PossibleImmutabilityPolicyStateValues returns an array of possible values for the ImmutabilityPolicyState const type. +func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState { + return []ImmutabilityPolicyState{ImmutabilityPolicyStateLocked, ImmutabilityPolicyStateUnlocked} +} + +// ImmutabilityPolicyUpdateType enumerates the values for immutability policy update type. +type ImmutabilityPolicyUpdateType string + +const ( + // ImmutabilityPolicyUpdateTypeExtend ... + ImmutabilityPolicyUpdateTypeExtend ImmutabilityPolicyUpdateType = "extend" + // ImmutabilityPolicyUpdateTypeLock ... + ImmutabilityPolicyUpdateTypeLock ImmutabilityPolicyUpdateType = "lock" + // ImmutabilityPolicyUpdateTypePut ... + ImmutabilityPolicyUpdateTypePut ImmutabilityPolicyUpdateType = "put" +) + +// PossibleImmutabilityPolicyUpdateTypeValues returns an array of possible values for the ImmutabilityPolicyUpdateType const type. +func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType { + return []ImmutabilityPolicyUpdateType{ImmutabilityPolicyUpdateTypeExtend, ImmutabilityPolicyUpdateTypeLock, ImmutabilityPolicyUpdateTypePut} +} + +// KeyPermission enumerates the values for key permission. +type KeyPermission string + +const ( + // KeyPermissionFull ... + KeyPermissionFull KeyPermission = "Full" + // KeyPermissionRead ... + KeyPermissionRead KeyPermission = "Read" +) + +// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type. +func PossibleKeyPermissionValues() []KeyPermission { + return []KeyPermission{KeyPermissionFull, KeyPermissionRead} +} + +// KeySource enumerates the values for key source. +type KeySource string + +const ( + // KeySourceMicrosoftKeyvault ... + KeySourceMicrosoftKeyvault KeySource = "Microsoft.Keyvault" + // KeySourceMicrosoftStorage ... + KeySourceMicrosoftStorage KeySource = "Microsoft.Storage" +) + +// PossibleKeySourceValues returns an array of possible values for the KeySource const type. +func PossibleKeySourceValues() []KeySource { + return []KeySource{KeySourceMicrosoftKeyvault, KeySourceMicrosoftStorage} +} + +// KeyType enumerates the values for key type. +type KeyType string + +const ( + // KeyTypeAccount ... + KeyTypeAccount KeyType = "Account" + // KeyTypeService ... + KeyTypeService KeyType = "Service" +) + +// PossibleKeyTypeValues returns an array of possible values for the KeyType const type. +func PossibleKeyTypeValues() []KeyType { + return []KeyType{KeyTypeAccount, KeyTypeService} +} + +// Kind enumerates the values for kind. +type Kind string + +const ( + // KindBlobStorage ... + KindBlobStorage Kind = "BlobStorage" + // KindBlockBlobStorage ... + KindBlockBlobStorage Kind = "BlockBlobStorage" + // KindFileStorage ... + KindFileStorage Kind = "FileStorage" + // KindStorage ... + KindStorage Kind = "Storage" + // KindStorageV2 ... + KindStorageV2 Kind = "StorageV2" +) + +// PossibleKindValues returns an array of possible values for the Kind const type. +func PossibleKindValues() []Kind { + return []Kind{KindBlobStorage, KindBlockBlobStorage, KindFileStorage, KindStorage, KindStorageV2} +} + +// LargeFileSharesState enumerates the values for large file shares state. +type LargeFileSharesState string + +const ( + // LargeFileSharesStateDisabled ... + LargeFileSharesStateDisabled LargeFileSharesState = "Disabled" + // LargeFileSharesStateEnabled ... + LargeFileSharesStateEnabled LargeFileSharesState = "Enabled" +) + +// PossibleLargeFileSharesStateValues returns an array of possible values for the LargeFileSharesState const type. +func PossibleLargeFileSharesStateValues() []LargeFileSharesState { + return []LargeFileSharesState{LargeFileSharesStateDisabled, LargeFileSharesStateEnabled} +} + +// LeaseDuration enumerates the values for lease duration. +type LeaseDuration string + +const ( + // LeaseDurationFixed ... + LeaseDurationFixed LeaseDuration = "Fixed" + // LeaseDurationInfinite ... + LeaseDurationInfinite LeaseDuration = "Infinite" +) + +// PossibleLeaseDurationValues returns an array of possible values for the LeaseDuration const type. +func PossibleLeaseDurationValues() []LeaseDuration { + return []LeaseDuration{LeaseDurationFixed, LeaseDurationInfinite} +} + +// LeaseShareAction enumerates the values for lease share action. +type LeaseShareAction string + +const ( + // LeaseShareActionAcquire ... + LeaseShareActionAcquire LeaseShareAction = "Acquire" + // LeaseShareActionBreak ... + LeaseShareActionBreak LeaseShareAction = "Break" + // LeaseShareActionChange ... + LeaseShareActionChange LeaseShareAction = "Change" + // LeaseShareActionRelease ... + LeaseShareActionRelease LeaseShareAction = "Release" + // LeaseShareActionRenew ... + LeaseShareActionRenew LeaseShareAction = "Renew" +) + +// PossibleLeaseShareActionValues returns an array of possible values for the LeaseShareAction const type. +func PossibleLeaseShareActionValues() []LeaseShareAction { + return []LeaseShareAction{LeaseShareActionAcquire, LeaseShareActionBreak, LeaseShareActionChange, LeaseShareActionRelease, LeaseShareActionRenew} +} + +// LeaseState enumerates the values for lease state. +type LeaseState string + +const ( + // LeaseStateAvailable ... + LeaseStateAvailable LeaseState = "Available" + // LeaseStateBreaking ... + LeaseStateBreaking LeaseState = "Breaking" + // LeaseStateBroken ... + LeaseStateBroken LeaseState = "Broken" + // LeaseStateExpired ... + LeaseStateExpired LeaseState = "Expired" + // LeaseStateLeased ... + LeaseStateLeased LeaseState = "Leased" +) + +// PossibleLeaseStateValues returns an array of possible values for the LeaseState const type. +func PossibleLeaseStateValues() []LeaseState { + return []LeaseState{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased} +} + +// LeaseStatus enumerates the values for lease status. +type LeaseStatus string + +const ( + // LeaseStatusLocked ... + LeaseStatusLocked LeaseStatus = "Locked" + // LeaseStatusUnlocked ... + LeaseStatusUnlocked LeaseStatus = "Unlocked" +) + +// PossibleLeaseStatusValues returns an array of possible values for the LeaseStatus const type. +func PossibleLeaseStatusValues() []LeaseStatus { + return []LeaseStatus{LeaseStatusLocked, LeaseStatusUnlocked} +} + +// ListContainersInclude enumerates the values for list containers include. +type ListContainersInclude string + +const ( + // ListContainersIncludeDeleted ... + ListContainersIncludeDeleted ListContainersInclude = "deleted" +) + +// PossibleListContainersIncludeValues returns an array of possible values for the ListContainersInclude const type. +func PossibleListContainersIncludeValues() []ListContainersInclude { + return []ListContainersInclude{ListContainersIncludeDeleted} +} + +// ListKeyExpand enumerates the values for list key expand. +type ListKeyExpand string + +const ( + // ListKeyExpandKerb ... + ListKeyExpandKerb ListKeyExpand = "kerb" +) + +// PossibleListKeyExpandValues returns an array of possible values for the ListKeyExpand const type. +func PossibleListKeyExpandValues() []ListKeyExpand { + return []ListKeyExpand{ListKeyExpandKerb} +} + +// MigrationState enumerates the values for migration state. +type MigrationState string + +const ( + // MigrationStateCompleted ... + MigrationStateCompleted MigrationState = "Completed" + // MigrationStateInProgress ... + MigrationStateInProgress MigrationState = "InProgress" +) + +// PossibleMigrationStateValues returns an array of possible values for the MigrationState const type. +func PossibleMigrationStateValues() []MigrationState { + return []MigrationState{MigrationStateCompleted, MigrationStateInProgress} +} + +// MinimumTLSVersion enumerates the values for minimum tls version. +type MinimumTLSVersion string + +const ( + // MinimumTLSVersionTLS10 ... + MinimumTLSVersionTLS10 MinimumTLSVersion = "TLS1_0" + // MinimumTLSVersionTLS11 ... + MinimumTLSVersionTLS11 MinimumTLSVersion = "TLS1_1" + // MinimumTLSVersionTLS12 ... + MinimumTLSVersionTLS12 MinimumTLSVersion = "TLS1_2" +) + +// PossibleMinimumTLSVersionValues returns an array of possible values for the MinimumTLSVersion const type. +func PossibleMinimumTLSVersionValues() []MinimumTLSVersion { + return []MinimumTLSVersion{MinimumTLSVersionTLS10, MinimumTLSVersionTLS11, MinimumTLSVersionTLS12} +} + +// Name enumerates the values for name. +type Name string + +const ( + // NameAccessTimeTracking ... + NameAccessTimeTracking Name = "AccessTimeTracking" +) + +// PossibleNameValues returns an array of possible values for the Name const type. +func PossibleNameValues() []Name { + return []Name{NameAccessTimeTracking} +} + +// ObjectType enumerates the values for object type. +type ObjectType string + +const ( + // ObjectTypeBlob ... + ObjectTypeBlob ObjectType = "Blob" + // ObjectTypeContainer ... + ObjectTypeContainer ObjectType = "Container" +) + +// PossibleObjectTypeValues returns an array of possible values for the ObjectType const type. +func PossibleObjectTypeValues() []ObjectType { + return []ObjectType{ObjectTypeBlob, ObjectTypeContainer} +} + +// Permissions enumerates the values for permissions. +type Permissions string + +const ( + // PermissionsA ... + PermissionsA Permissions = "a" + // PermissionsC ... + PermissionsC Permissions = "c" + // PermissionsD ... + PermissionsD Permissions = "d" + // PermissionsL ... + PermissionsL Permissions = "l" + // PermissionsP ... + PermissionsP Permissions = "p" + // PermissionsR ... + PermissionsR Permissions = "r" + // PermissionsU ... + PermissionsU Permissions = "u" + // PermissionsW ... + PermissionsW Permissions = "w" +) + +// PossiblePermissionsValues returns an array of possible values for the Permissions const type. +func PossiblePermissionsValues() []Permissions { + return []Permissions{PermissionsA, PermissionsC, PermissionsD, PermissionsL, PermissionsP, PermissionsR, PermissionsU, PermissionsW} +} + +// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection +// provisioning state. +type PrivateEndpointConnectionProvisioningState string + +const ( + // PrivateEndpointConnectionProvisioningStateCreating ... + PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating" + // PrivateEndpointConnectionProvisioningStateDeleting ... + PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting" + // PrivateEndpointConnectionProvisioningStateFailed ... + PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed" + // PrivateEndpointConnectionProvisioningStateSucceeded ... + PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded" +) + +// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type. +func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState { + return []PrivateEndpointConnectionProvisioningState{PrivateEndpointConnectionProvisioningStateCreating, PrivateEndpointConnectionProvisioningStateDeleting, PrivateEndpointConnectionProvisioningStateFailed, PrivateEndpointConnectionProvisioningStateSucceeded} +} + +// PrivateEndpointServiceConnectionStatus enumerates the values for private endpoint service connection status. +type PrivateEndpointServiceConnectionStatus string + +const ( + // PrivateEndpointServiceConnectionStatusApproved ... + PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved" + // PrivateEndpointServiceConnectionStatusPending ... + PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending" + // PrivateEndpointServiceConnectionStatusRejected ... + PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected" +) + +// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type. +func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus { + return []PrivateEndpointServiceConnectionStatus{PrivateEndpointServiceConnectionStatusApproved, PrivateEndpointServiceConnectionStatusPending, PrivateEndpointServiceConnectionStatusRejected} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateCreating ... + ProvisioningStateCreating ProvisioningState = "Creating" + // ProvisioningStateResolvingDNS ... + ProvisioningStateResolvingDNS ProvisioningState = "ResolvingDNS" + // ProvisioningStateSucceeded ... + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateResolvingDNS, ProvisioningStateSucceeded} +} + +// PublicAccess enumerates the values for public access. +type PublicAccess string + +const ( + // PublicAccessBlob ... + PublicAccessBlob PublicAccess = "Blob" + // PublicAccessContainer ... + PublicAccessContainer PublicAccess = "Container" + // PublicAccessNone ... + PublicAccessNone PublicAccess = "None" +) + +// PossiblePublicAccessValues returns an array of possible values for the PublicAccess const type. +func PossiblePublicAccessValues() []PublicAccess { + return []PublicAccess{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} +} + +// Reason enumerates the values for reason. +type Reason string + +const ( + // ReasonAccountNameInvalid ... + ReasonAccountNameInvalid Reason = "AccountNameInvalid" + // ReasonAlreadyExists ... + ReasonAlreadyExists Reason = "AlreadyExists" +) + +// PossibleReasonValues returns an array of possible values for the Reason const type. +func PossibleReasonValues() []Reason { + return []Reason{ReasonAccountNameInvalid, ReasonAlreadyExists} +} + +// ReasonCode enumerates the values for reason code. +type ReasonCode string + +const ( + // ReasonCodeNotAvailableForSubscription ... + ReasonCodeNotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" + // ReasonCodeQuotaID ... + ReasonCodeQuotaID ReasonCode = "QuotaId" +) + +// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type. +func PossibleReasonCodeValues() []ReasonCode { + return []ReasonCode{ReasonCodeNotAvailableForSubscription, ReasonCodeQuotaID} +} + +// RootSquashType enumerates the values for root squash type. +type RootSquashType string + +const ( + // RootSquashTypeAllSquash ... + RootSquashTypeAllSquash RootSquashType = "AllSquash" + // RootSquashTypeNoRootSquash ... + RootSquashTypeNoRootSquash RootSquashType = "NoRootSquash" + // RootSquashTypeRootSquash ... + RootSquashTypeRootSquash RootSquashType = "RootSquash" +) + +// PossibleRootSquashTypeValues returns an array of possible values for the RootSquashType const type. +func PossibleRootSquashTypeValues() []RootSquashType { + return []RootSquashType{RootSquashTypeAllSquash, RootSquashTypeNoRootSquash, RootSquashTypeRootSquash} +} + +// RoutingChoice enumerates the values for routing choice. +type RoutingChoice string + +const ( + // RoutingChoiceInternetRouting ... + RoutingChoiceInternetRouting RoutingChoice = "InternetRouting" + // RoutingChoiceMicrosoftRouting ... + RoutingChoiceMicrosoftRouting RoutingChoice = "MicrosoftRouting" +) + +// PossibleRoutingChoiceValues returns an array of possible values for the RoutingChoice const type. +func PossibleRoutingChoiceValues() []RoutingChoice { + return []RoutingChoice{RoutingChoiceInternetRouting, RoutingChoiceMicrosoftRouting} +} + +// Schedule enumerates the values for schedule. +type Schedule string + +const ( + // ScheduleDaily ... + ScheduleDaily Schedule = "Daily" + // ScheduleWeekly ... + ScheduleWeekly Schedule = "Weekly" +) + +// PossibleScheduleValues returns an array of possible values for the Schedule const type. +func PossibleScheduleValues() []Schedule { + return []Schedule{ScheduleDaily, ScheduleWeekly} +} + +// Services enumerates the values for services. +type Services string + +const ( + // ServicesB ... + ServicesB Services = "b" + // ServicesF ... + ServicesF Services = "f" + // ServicesQ ... + ServicesQ Services = "q" + // ServicesT ... + ServicesT Services = "t" +) + +// PossibleServicesValues returns an array of possible values for the Services const type. +func PossibleServicesValues() []Services { + return []Services{ServicesB, ServicesF, ServicesQ, ServicesT} +} + +// ShareAccessTier enumerates the values for share access tier. +type ShareAccessTier string + +const ( + // ShareAccessTierCool ... + ShareAccessTierCool ShareAccessTier = "Cool" + // ShareAccessTierHot ... + ShareAccessTierHot ShareAccessTier = "Hot" + // ShareAccessTierPremium ... + ShareAccessTierPremium ShareAccessTier = "Premium" + // ShareAccessTierTransactionOptimized ... + ShareAccessTierTransactionOptimized ShareAccessTier = "TransactionOptimized" +) + +// PossibleShareAccessTierValues returns an array of possible values for the ShareAccessTier const type. +func PossibleShareAccessTierValues() []ShareAccessTier { + return []ShareAccessTier{ShareAccessTierCool, ShareAccessTierHot, ShareAccessTierPremium, ShareAccessTierTransactionOptimized} +} + +// SignedResource enumerates the values for signed resource. +type SignedResource string + +const ( + // SignedResourceB ... + SignedResourceB SignedResource = "b" + // SignedResourceC ... + SignedResourceC SignedResource = "c" + // SignedResourceF ... + SignedResourceF SignedResource = "f" + // SignedResourceS ... + SignedResourceS SignedResource = "s" +) + +// PossibleSignedResourceValues returns an array of possible values for the SignedResource const type. +func PossibleSignedResourceValues() []SignedResource { + return []SignedResource{SignedResourceB, SignedResourceC, SignedResourceF, SignedResourceS} +} + +// SignedResourceTypes enumerates the values for signed resource types. +type SignedResourceTypes string + +const ( + // SignedResourceTypesC ... + SignedResourceTypesC SignedResourceTypes = "c" + // SignedResourceTypesO ... + SignedResourceTypesO SignedResourceTypes = "o" + // SignedResourceTypesS ... + SignedResourceTypesS SignedResourceTypes = "s" +) + +// PossibleSignedResourceTypesValues returns an array of possible values for the SignedResourceTypes const type. +func PossibleSignedResourceTypesValues() []SignedResourceTypes { + return []SignedResourceTypes{SignedResourceTypesC, SignedResourceTypesO, SignedResourceTypesS} +} + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // SkuNamePremiumLRS ... + SkuNamePremiumLRS SkuName = "Premium_LRS" + // SkuNamePremiumZRS ... + SkuNamePremiumZRS SkuName = "Premium_ZRS" + // SkuNameStandardGRS ... + SkuNameStandardGRS SkuName = "Standard_GRS" + // SkuNameStandardGZRS ... + SkuNameStandardGZRS SkuName = "Standard_GZRS" + // SkuNameStandardLRS ... + SkuNameStandardLRS SkuName = "Standard_LRS" + // SkuNameStandardRAGRS ... + SkuNameStandardRAGRS SkuName = "Standard_RAGRS" + // SkuNameStandardRAGZRS ... + SkuNameStandardRAGZRS SkuName = "Standard_RAGZRS" + // SkuNameStandardZRS ... + SkuNameStandardZRS SkuName = "Standard_ZRS" +) + +// PossibleSkuNameValues returns an array of possible values for the SkuName const type. +func PossibleSkuNameValues() []SkuName { + return []SkuName{SkuNamePremiumLRS, SkuNamePremiumZRS, SkuNameStandardGRS, SkuNameStandardGZRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardRAGZRS, SkuNameStandardZRS} +} + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // SkuTierPremium ... + SkuTierPremium SkuTier = "Premium" + // SkuTierStandard ... + SkuTierStandard SkuTier = "Standard" +) + +// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. +func PossibleSkuTierValues() []SkuTier { + return []SkuTier{SkuTierPremium, SkuTierStandard} +} + +// State enumerates the values for state. +type State string + +const ( + // StateDeprovisioning ... + StateDeprovisioning State = "Deprovisioning" + // StateFailed ... + StateFailed State = "Failed" + // StateNetworkSourceDeleted ... + StateNetworkSourceDeleted State = "NetworkSourceDeleted" + // StateProvisioning ... + StateProvisioning State = "Provisioning" + // StateSucceeded ... + StateSucceeded State = "Succeeded" +) + +// PossibleStateValues returns an array of possible values for the State const type. +func PossibleStateValues() []State { + return []State{StateDeprovisioning, StateFailed, StateNetworkSourceDeleted, StateProvisioning, StateSucceeded} +} + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // UsageUnitBytes ... + UsageUnitBytes UsageUnit = "Bytes" + // UsageUnitBytesPerSecond ... + UsageUnitBytesPerSecond UsageUnit = "BytesPerSecond" + // UsageUnitCount ... + UsageUnitCount UsageUnit = "Count" + // UsageUnitCountsPerSecond ... + UsageUnitCountsPerSecond UsageUnit = "CountsPerSecond" + // UsageUnitPercent ... + UsageUnitPercent UsageUnit = "Percent" + // UsageUnitSeconds ... + UsageUnitSeconds UsageUnit = "Seconds" +) + +// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type. +func PossibleUsageUnitValues() []UsageUnit { + return []UsageUnit{UsageUnitBytes, UsageUnitBytesPerSecond, UsageUnitCount, UsageUnitCountsPerSecond, UsageUnitPercent, UsageUnitSeconds} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/fileservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/fileservices.go new file mode 100644 index 000000000000..e626313de1a4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/fileservices.go @@ -0,0 +1,323 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// FileServicesClient is the the Azure Storage Management API. +type FileServicesClient struct { + BaseClient +} + +// NewFileServicesClient creates an instance of the FileServicesClient client. +func NewFileServicesClient(subscriptionID string) FileServicesClient { + return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient { + return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource +// Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error()) + } + + req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.GetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetServicePropertiesPreparer prepares the GetServiceProperties request. +func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "FileServicesName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always +// closes the http.Response Body. +func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list all file services in storage accounts +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileServicesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource +// Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource +// Sharing) rules. +func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, + {Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error()) + } + + req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.SetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// SetServicePropertiesPreparer prepares the SetServiceProperties request. +func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "FileServicesName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.Sku = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always +// closes the http.Response Body. +func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/fileshares.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/fileshares.go new file mode 100644 index 000000000000..8c6d637d9f6b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/fileshares.go @@ -0,0 +1,824 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// FileSharesClient is the the Azure Storage Management API. +type FileSharesClient struct { + BaseClient +} + +// NewFileSharesClient creates an instance of the FileSharesClient client. +func NewFileSharesClient(subscriptionID string) FileSharesClient { + return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient { + return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a new share under the specified account as described by request body. The share resource includes +// metadata and properties for that share. It does not include a list of the files contained by the share. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// fileShare - properties of the file share to create. +// expand - optional, used to expand the properties within share's properties. Valid values are: snapshots. +// Should be passed as a string with delimiter ',' +func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare, expand string) (result FileShare, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: fileShare, + Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(102400), Chain: nil}, + {Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithJSON(fileShare), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes specified share under its account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// xMsSnapshot - optional, used to delete a snapshot. +// include - optional. Valid values are: snapshots, leased-snapshots, none. The default value is snapshots. For +// 'snapshots', the file share is deleted including all of its file share snapshots. If the file share contains +// leased-snapshots, the deletion fails. For 'leased-snapshots', the file share is deleted included all of its +// file share snapshots (leased/unleased). For 'none', the file share is deleted if it has no share snapshots. +// If the file share contains any snapshots (leased or unleased), the deletion fails. +func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string, xMsSnapshot string, include string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName, xMsSnapshot, include) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, xMsSnapshot string, include string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(include) > 0 { + queryParameters["$include"] = autorest.Encode("query", include) + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(xMsSnapshot) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets properties of a specified share. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// expand - optional, used to expand the properties within share's properties. Valid values are: stats. Should +// be passed as a string with delimiter ','. +// xMsSnapshot - optional, used to retrieve properties of a snapshot. +func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string, expand string, xMsSnapshot string) (result FileShare, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName, expand, xMsSnapshot) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, expand string, xMsSnapshot string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(xMsSnapshot) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Lease the Lease Share operation establishes and manages a lock on a share for delete operations. The lock duration +// can be 15 to 60 seconds, or can be infinite. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// parameters - lease Share request body. +// xMsSnapshot - optional. Specify the snapshot time to lease a snapshot. +func (client FileSharesClient) Lease(ctx context.Context, resourceGroupName string, accountName string, shareName string, parameters *LeaseShareRequest, xMsSnapshot string) (result LeaseShareResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Lease") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Lease", err.Error()) + } + + req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, shareName, parameters, xMsSnapshot) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Lease", nil, "Failure preparing request") + return + } + + resp, err := client.LeaseSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Lease", resp, "Failure sending request") + return + } + + result, err = client.LeaseResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Lease", resp, "Failure responding to request") + return + } + + return +} + +// LeasePreparer prepares the Lease request. +func (client FileSharesClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, parameters *LeaseShareRequest, xMsSnapshot string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/lease", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + if len(xMsSnapshot) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// LeaseSender sends the Lease request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) LeaseSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// LeaseResponder handles the response to the Lease request. The method always +// closes the http.Response Body. +func (client FileSharesClient) LeaseResponder(resp *http.Response) (result LeaseShareResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all shares. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// maxpagesize - optional. Specified maximum number of shares that can be included in the list. +// filter - optional. When specified, only share names starting with the filter will be listed. +// expand - optional, used to expand the properties within share's properties. Valid values are: deleted, +// snapshots. Should be passed as a string with delimiter ',' +func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand string) (result FileShareItemsPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List") + defer func() { + sc := -1 + if result.fsi.Response.Response != nil { + sc = result.fsi.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.fsi.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request") + return + } + + result.fsi, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request") + return + } + if result.fsi.hasNextLink() && result.fsi.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(maxpagesize) > 0 { + queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) { + req, err := lastResults.fileShareItemsPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand string) (result FileShareItemsIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter, expand) + return +} + +// Restore restore a file share within a valid retention days if share soft delete is enabled +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +func (client FileSharesClient) Restore(ctx context.Context, resourceGroupName string, accountName string, shareName string, deletedShare DeletedShare) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Restore") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: deletedShare, + Constraints: []validation.Constraint{{Target: "deletedShare.DeletedShareName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "deletedShare.DeletedShareVersion", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Restore", err.Error()) + } + + req, err := client.RestorePreparer(ctx, resourceGroupName, accountName, shareName, deletedShare) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", resp, "Failure sending request") + return + } + + result, err = client.RestoreResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", resp, "Failure responding to request") + return + } + + return +} + +// RestorePreparer prepares the Restore request. +func (client FileSharesClient) RestorePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, deletedShare DeletedShare) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore", pathParameters), + autorest.WithJSON(deletedShare), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreSender sends the Restore request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) RestoreSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// RestoreResponder handles the response to the Restore request. The method always +// closes the http.Response Body. +func (client FileSharesClient) RestoreResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates share properties as specified in request body. Properties not mentioned in the request will not be +// changed. Update fails if the specified share does not already exist. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// shareName - the name of the file share within the specified storage account. File share names must be +// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) +// character must be immediately preceded and followed by a letter or number. +// fileShare - properties to update for the file share. +func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: shareName, + Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.FileSharesClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "shareName": autorest.Encode("path", shareName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), + autorest.WithJSON(fileShare), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/managementpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/managementpolicies.go new file mode 100644 index 000000000000..8181c9be7531 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/managementpolicies.go @@ -0,0 +1,316 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ManagementPoliciesClient is the the Azure Storage Management API. +type ManagementPoliciesClient struct { + BaseClient +} + +// NewManagementPoliciesClient creates an instance of the ManagementPoliciesClient client. +func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient { + return NewManagementPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewManagementPoliciesClientWithBaseURI creates an instance of the ManagementPoliciesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient { + return ManagementPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate sets the managementpolicy to the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// properties - the ManagementPolicy set to a storage account. +func (client ManagementPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (result ManagementPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: properties, + Constraints: []validation.Constraint{{Target: "properties.ManagementPolicyProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("storage.ManagementPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagementPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "managementPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), + autorest.WithJSON(properties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagementPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ManagementPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the managementpolicy associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client ManagementPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ManagementPoliciesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagementPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "managementPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagementPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the managementpolicy associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client ManagementPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result ManagementPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ManagementPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "managementPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementPoliciesClient) GetResponder(resp *http.Response) (result ManagementPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/models.go new file mode 100644 index 000000000000..61695714c537 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/models.go @@ -0,0 +1,5273 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" + +// AccessPolicy ... +type AccessPolicy struct { + // Start - Start time of the access policy + Start *date.Time `json:"start,omitempty"` + // Expiry - Expiry time of the access policy + Expiry *date.Time `json:"expiry,omitempty"` + // Permission - List of abbreviated permissions. + Permission *string `json:"permission,omitempty"` +} + +// Account the storage account. +type Account struct { + autorest.Response `json:"-"` + // Sku - READ-ONLY; Gets the SKU. + Sku *Sku `json:"sku,omitempty"` + // Kind - READ-ONLY; Gets the Kind. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' + Kind Kind `json:"kind,omitempty"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // ExtendedLocation - The extendedLocation of the resource. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + // AccountProperties - Properties of the storage account. + *AccountProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Account. +func (a Account) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if a.Identity != nil { + objectMap["identity"] = a.Identity + } + if a.ExtendedLocation != nil { + objectMap["extendedLocation"] = a.ExtendedLocation + } + if a.AccountProperties != nil { + objectMap["properties"] = a.AccountProperties + } + if a.Tags != nil { + objectMap["tags"] = a.Tags + } + if a.Location != nil { + objectMap["location"] = a.Location + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Account struct. +func (a *Account) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + a.Sku = &sku + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + a.Kind = kind + } + case "identity": + if v != nil { + var identity Identity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + a.Identity = &identity + } + case "extendedLocation": + if v != nil { + var extendedLocation ExtendedLocation + err = json.Unmarshal(*v, &extendedLocation) + if err != nil { + return err + } + a.ExtendedLocation = &extendedLocation + } + case "properties": + if v != nil { + var accountProperties AccountProperties + err = json.Unmarshal(*v, &accountProperties) + if err != nil { + return err + } + a.AccountProperties = &accountProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + a.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + a.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + a.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + a.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + a.Type = &typeVar + } + } + } + + return nil +} + +// AccountCheckNameAvailabilityParameters the parameters used to check the availability of the storage +// account name. +type AccountCheckNameAvailabilityParameters struct { + // Name - The storage account name. + Name *string `json:"name,omitempty"` + // Type - The type of resource, Microsoft.Storage/storageAccounts + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters the parameters used when creating a storage account. +type AccountCreateParameters struct { + // Sku - Required. Gets or sets the SKU name. + Sku *Sku `json:"sku,omitempty"` + // Kind - Required. Indicates the type of storage account. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' + Kind Kind `json:"kind,omitempty"` + // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed. + Location *string `json:"location,omitempty"` + // ExtendedLocation - Optional. Set the extended location of the resource. If not set, the storage account will be created in Azure main region. Otherwise it will be created in the specified extended location + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters. + Tags map[string]*string `json:"tags"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // AccountPropertiesCreateParameters - The parameters used to create the storage account. + *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountCreateParameters. +func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if acp.Sku != nil { + objectMap["sku"] = acp.Sku + } + if acp.Kind != "" { + objectMap["kind"] = acp.Kind + } + if acp.Location != nil { + objectMap["location"] = acp.Location + } + if acp.ExtendedLocation != nil { + objectMap["extendedLocation"] = acp.ExtendedLocation + } + if acp.Tags != nil { + objectMap["tags"] = acp.Tags + } + if acp.Identity != nil { + objectMap["identity"] = acp.Identity + } + if acp.AccountPropertiesCreateParameters != nil { + objectMap["properties"] = acp.AccountPropertiesCreateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct. +func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + acp.Sku = &sku + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + acp.Kind = kind + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + acp.Location = &location + } + case "extendedLocation": + if v != nil { + var extendedLocation ExtendedLocation + err = json.Unmarshal(*v, &extendedLocation) + if err != nil { + return err + } + acp.ExtendedLocation = &extendedLocation + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + acp.Tags = tags + } + case "identity": + if v != nil { + var identity Identity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + acp.Identity = &identity + } + case "properties": + if v != nil { + var accountPropertiesCreateParameters AccountPropertiesCreateParameters + err = json.Unmarshal(*v, &accountPropertiesCreateParameters) + if err != nil { + return err + } + acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters + } + } + } + + return nil +} + +// AccountInternetEndpoints the URIs that are used to perform a retrieval of a public blob, file, web or +// dfs object via a internet routing endpoint. +type AccountInternetEndpoints struct { + // Blob - READ-ONLY; Gets the blob endpoint. + Blob *string `json:"blob,omitempty"` + // File - READ-ONLY; Gets the file endpoint. + File *string `json:"file,omitempty"` + // Web - READ-ONLY; Gets the web endpoint. + Web *string `json:"web,omitempty"` + // Dfs - READ-ONLY; Gets the dfs endpoint. + Dfs *string `json:"dfs,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountInternetEndpoints. +func (aie AccountInternetEndpoints) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// AccountKey an access key for the storage account. +type AccountKey struct { + // KeyName - READ-ONLY; Name of the key. + KeyName *string `json:"keyName,omitempty"` + // Value - READ-ONLY; Base 64-encoded value of the key. + Value *string `json:"value,omitempty"` + // Permissions - READ-ONLY; Permissions for the key -- read-only or full permissions. Possible values include: 'KeyPermissionRead', 'KeyPermissionFull' + Permissions KeyPermission `json:"permissions,omitempty"` + // CreationTime - READ-ONLY; Creation time of the key, in round trip date format. + CreationTime *date.Time `json:"creationTime,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountKey. +func (ak AccountKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// AccountListKeysResult the response from the ListKeys operation. +type AccountListKeysResult struct { + autorest.Response `json:"-"` + // Keys - READ-ONLY; Gets the list of storage account keys and their properties for the specified storage account. + Keys *[]AccountKey `json:"keys,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountListKeysResult. +func (alkr AccountListKeysResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// AccountListResult the response from the List Storage Accounts operation. +type AccountListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Gets the list of storage accounts and their properties. + Value *[]Account `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of storage accounts. Returned when total number of requested storage accounts exceed maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountListResult. +func (alr AccountListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// AccountListResultIterator provides access to a complete listing of Account values. +type AccountListResultIterator struct { + i int + page AccountListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AccountListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AccountListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AccountListResultIterator) Response() AccountListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AccountListResultIterator) Value() Account { + if !iter.page.NotDone() { + return Account{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AccountListResultIterator type. +func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator { + return AccountListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (alr AccountListResult) IsEmpty() bool { + return alr.Value == nil || len(*alr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (alr AccountListResult) hasNextLink() bool { + return alr.NextLink != nil && len(*alr.NextLink) != 0 +} + +// accountListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) { + if !alr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(alr.NextLink))) +} + +// AccountListResultPage contains a page of Account values. +type AccountListResultPage struct { + fn func(context.Context, AccountListResult) (AccountListResult, error) + alr AccountListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.alr) + if err != nil { + return err + } + page.alr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AccountListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AccountListResultPage) NotDone() bool { + return !page.alr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AccountListResultPage) Response() AccountListResult { + return page.alr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AccountListResultPage) Values() []Account { + if page.alr.IsEmpty() { + return nil + } + return *page.alr.Value +} + +// Creates a new instance of the AccountListResultPage type. +func NewAccountListResultPage(cur AccountListResult, getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage { + return AccountListResultPage{ + fn: getNextPage, + alr: cur, + } +} + +// AccountMicrosoftEndpoints the URIs that are used to perform a retrieval of a public blob, queue, table, +// web or dfs object via a microsoft routing endpoint. +type AccountMicrosoftEndpoints struct { + // Blob - READ-ONLY; Gets the blob endpoint. + Blob *string `json:"blob,omitempty"` + // Queue - READ-ONLY; Gets the queue endpoint. + Queue *string `json:"queue,omitempty"` + // Table - READ-ONLY; Gets the table endpoint. + Table *string `json:"table,omitempty"` + // File - READ-ONLY; Gets the file endpoint. + File *string `json:"file,omitempty"` + // Web - READ-ONLY; Gets the web endpoint. + Web *string `json:"web,omitempty"` + // Dfs - READ-ONLY; Gets the dfs endpoint. + Dfs *string `json:"dfs,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountMicrosoftEndpoints. +func (ame AccountMicrosoftEndpoints) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// AccountProperties properties of the storage account. +type AccountProperties struct { + // ProvisioningState - READ-ONLY; Gets the status of the storage account at the time the operation was called. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateResolvingDNS', 'ProvisioningStateSucceeded' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // PrimaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint. + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + // PrimaryLocation - READ-ONLY; Gets the location of the primary data center for the storage account. + PrimaryLocation *string `json:"primaryLocation,omitempty"` + // StatusOfPrimary - READ-ONLY; Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'AccountStatusAvailable', 'AccountStatusUnavailable' + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + // LastGeoFailoverTime - READ-ONLY; Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS. + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + // SecondaryLocation - READ-ONLY; Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS. + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + // StatusOfSecondary - READ-ONLY; Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'AccountStatusAvailable', 'AccountStatusUnavailable' + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + // CreationTime - READ-ONLY; Gets the creation date and time of the storage account in UTC. + CreationTime *date.Time `json:"creationTime,omitempty"` + // CustomDomain - READ-ONLY; Gets the custom domain the user assigned to this storage account. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // SasPolicy - READ-ONLY; SasPolicy assigned to the storage account. + SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` + // KeyPolicy - READ-ONLY; KeyPolicy assigned to the storage account. + KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"` + // KeyCreationTime - READ-ONLY; Storage account keys creation time. + KeyCreationTime *KeyCreationTime `json:"keyCreationTime,omitempty"` + // SecondaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS. + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + // Encryption - READ-ONLY; Gets the encryption settings on the account. If unspecified, the account is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // AccessTier - READ-ONLY; Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'AccessTierHot', 'AccessTierCool' + AccessTier AccessTier `json:"accessTier,omitempty"` + // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files. + AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` + // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + // NetworkRuleSet - READ-ONLY; Network rule set + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` + // GeoReplicationStats - READ-ONLY; Geo Replication Stats + GeoReplicationStats *GeoReplicationStats `json:"geoReplicationStats,omitempty"` + // FailoverInProgress - READ-ONLY; If the failover is in progress, the value will be true, otherwise, it will be null. + FailoverInProgress *bool `json:"failoverInProgress,omitempty"` + // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled' + LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` + // PrivateEndpointConnections - READ-ONLY; List of private endpoint connection associated with the specified storage account + PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` + // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer + RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"` + // BlobRestoreStatus - READ-ONLY; Blob restore status + BlobRestoreStatus *BlobRestoreStatus `json:"blobRestoreStatus,omitempty"` + // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property. + AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"` + // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12' + MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` + // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true. + AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"` + // EnableNfsV3 - NFS 3.0 protocol support enabled if set to true. + EnableNfsV3 *bool `json:"isNfsV3Enabled,omitempty"` + // AllowCrossTenantReplication - Allow or disallow cross AAD tenant object replication. The default interpretation is true for this property. + AllowCrossTenantReplication *bool `json:"allowCrossTenantReplication,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountProperties. +func (ap AccountProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ap.AzureFilesIdentityBasedAuthentication != nil { + objectMap["azureFilesIdentityBasedAuthentication"] = ap.AzureFilesIdentityBasedAuthentication + } + if ap.EnableHTTPSTrafficOnly != nil { + objectMap["supportsHttpsTrafficOnly"] = ap.EnableHTTPSTrafficOnly + } + if ap.IsHnsEnabled != nil { + objectMap["isHnsEnabled"] = ap.IsHnsEnabled + } + if ap.LargeFileSharesState != "" { + objectMap["largeFileSharesState"] = ap.LargeFileSharesState + } + if ap.RoutingPreference != nil { + objectMap["routingPreference"] = ap.RoutingPreference + } + if ap.AllowBlobPublicAccess != nil { + objectMap["allowBlobPublicAccess"] = ap.AllowBlobPublicAccess + } + if ap.MinimumTLSVersion != "" { + objectMap["minimumTlsVersion"] = ap.MinimumTLSVersion + } + if ap.AllowSharedKeyAccess != nil { + objectMap["allowSharedKeyAccess"] = ap.AllowSharedKeyAccess + } + if ap.EnableNfsV3 != nil { + objectMap["isNfsV3Enabled"] = ap.EnableNfsV3 + } + if ap.AllowCrossTenantReplication != nil { + objectMap["allowCrossTenantReplication"] = ap.AllowCrossTenantReplication + } + return json.Marshal(objectMap) +} + +// AccountPropertiesCreateParameters the parameters used to create the storage account. +type AccountPropertiesCreateParameters struct { + // SasPolicy - SasPolicy assigned to the storage account. + SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` + // KeyPolicy - KeyPolicy assigned to the storage account. + KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"` + // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // Encryption - Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled. + Encryption *Encryption `json:"encryption,omitempty"` + // NetworkRuleSet - Network rule set + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'AccessTierHot', 'AccessTierCool' + AccessTier AccessTier `json:"accessTier,omitempty"` + // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files. + AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` + // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01. + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` + // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled' + LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` + // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer + RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"` + // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property. + AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"` + // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12' + MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` + // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true. + AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"` + // EnableNfsV3 - NFS 3.0 protocol support enabled if set to true. + EnableNfsV3 *bool `json:"isNfsV3Enabled,omitempty"` + // AllowCrossTenantReplication - Allow or disallow cross AAD tenant object replication. The default interpretation is true for this property. + AllowCrossTenantReplication *bool `json:"allowCrossTenantReplication,omitempty"` +} + +// AccountPropertiesUpdateParameters the parameters used when updating a storage account. +type AccountPropertiesUpdateParameters struct { + // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // Encryption - Provides the encryption settings on the account. The default setting is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // SasPolicy - SasPolicy assigned to the storage account. + SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` + // KeyPolicy - KeyPolicy assigned to the storage account. + KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'AccessTierHot', 'AccessTierCool' + AccessTier AccessTier `json:"accessTier,omitempty"` + // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files. + AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` + // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + // NetworkRuleSet - Network rule set + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled' + LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` + // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer + RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"` + // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property. + AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"` + // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12' + MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` + // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true. + AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"` + // AllowCrossTenantReplication - Allow or disallow cross AAD tenant object replication. The default interpretation is true for this property. + AllowCrossTenantReplication *bool `json:"allowCrossTenantReplication,omitempty"` +} + +// AccountRegenerateKeyParameters the parameters used to regenerate the storage account key. +type AccountRegenerateKeyParameters struct { + // KeyName - The name of storage keys that want to be regenerated, possible values are key1, key2, kerb1, kerb2. + KeyName *string `json:"keyName,omitempty"` +} + +// AccountSasParameters the parameters to list SAS credentials of a storage account. +type AccountSasParameters struct { + // Services - The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). Possible values include: 'ServicesB', 'ServicesQ', 'ServicesT', 'ServicesF' + Services Services `json:"signedServices,omitempty"` + // ResourceTypes - The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. Possible values include: 'SignedResourceTypesS', 'SignedResourceTypesC', 'SignedResourceTypesO' + ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` + // Permissions - The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'PermissionsR', 'PermissionsD', 'PermissionsW', 'PermissionsL', 'PermissionsA', 'PermissionsC', 'PermissionsU', 'PermissionsP' + Permissions Permissions `json:"signedPermission,omitempty"` + // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. + IPAddressOrRange *string `json:"signedIp,omitempty"` + // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'HTTPProtocolHttpshttp', 'HTTPProtocolHTTPS' + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + // SharedAccessStartTime - The time at which the SAS becomes valid. + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + // KeyToSign - The key to sign the account SAS token with. + KeyToSign *string `json:"keyToSign,omitempty"` +} + +// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type AccountsCreateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(AccountsClient) (Account, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AccountsCreateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AccountsCreateFuture.Result. +func (future *AccountsCreateFuture) result(client AccountsClient) (a Account, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + a.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent { + a, err = client.CreateResponder(a.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") + } + } + return +} + +// AccountsFailoverFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type AccountsFailoverFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(AccountsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AccountsFailoverFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AccountsFailoverFuture.Result. +func (future *AccountsFailoverFuture) result(client AccountsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsFailoverFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storage.AccountsFailoverFuture") + return + } + ar.Response = future.Response() + return +} + +// AccountsRestoreBlobRangesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type AccountsRestoreBlobRangesFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(AccountsClient) (BlobRestoreStatus, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *AccountsRestoreBlobRangesFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for AccountsRestoreBlobRangesFuture.Result. +func (future *AccountsRestoreBlobRangesFuture) result(client AccountsClient) (brs BlobRestoreStatus, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsRestoreBlobRangesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + brs.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storage.AccountsRestoreBlobRangesFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if brs.Response.Response, err = future.GetResult(sender); err == nil && brs.Response.Response.StatusCode != http.StatusNoContent { + brs, err = client.RestoreBlobRangesResponder(brs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsRestoreBlobRangesFuture", "Result", brs.Response.Response, "Failure responding to request") + } + } + return +} + +// AccountUpdateParameters the parameters that can be provided when updating the storage account +// properties. +type AccountUpdateParameters struct { + // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, nor can accounts of those SKU names be updated to any other value. + Sku *Sku `json:"sku,omitempty"` + // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters. + Tags map[string]*string `json:"tags"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // AccountPropertiesUpdateParameters - The parameters used when updating a storage account. + *AccountPropertiesUpdateParameters `json:"properties,omitempty"` + // Kind - Optional. Indicates the type of storage account. Currently only StorageV2 value supported by server. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' + Kind Kind `json:"kind,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountUpdateParameters. +func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if aup.Sku != nil { + objectMap["sku"] = aup.Sku + } + if aup.Tags != nil { + objectMap["tags"] = aup.Tags + } + if aup.Identity != nil { + objectMap["identity"] = aup.Identity + } + if aup.AccountPropertiesUpdateParameters != nil { + objectMap["properties"] = aup.AccountPropertiesUpdateParameters + } + if aup.Kind != "" { + objectMap["kind"] = aup.Kind + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct. +func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + aup.Sku = &sku + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + aup.Tags = tags + } + case "identity": + if v != nil { + var identity Identity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + aup.Identity = &identity + } + case "properties": + if v != nil { + var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters + err = json.Unmarshal(*v, &accountPropertiesUpdateParameters) + if err != nil { + return err + } + aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + aup.Kind = kind + } + } + } + + return nil +} + +// ActiveDirectoryProperties settings properties for Active Directory (AD). +type ActiveDirectoryProperties struct { + // DomainName - Specifies the primary domain that the AD DNS server is authoritative for. + DomainName *string `json:"domainName,omitempty"` + // NetBiosDomainName - Specifies the NetBIOS domain name. + NetBiosDomainName *string `json:"netBiosDomainName,omitempty"` + // ForestName - Specifies the Active Directory forest to get. + ForestName *string `json:"forestName,omitempty"` + // DomainGUID - Specifies the domain GUID. + DomainGUID *string `json:"domainGuid,omitempty"` + // DomainSid - Specifies the security identifier (SID). + DomainSid *string `json:"domainSid,omitempty"` + // AzureStorageSid - Specifies the security identifier (SID) for Azure Storage. + AzureStorageSid *string `json:"azureStorageSid,omitempty"` +} + +// AzureEntityResource the resource model definition for an Azure Resource Manager resource with an etag. +type AzureEntityResource struct { + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureEntityResource. +func (aer AzureEntityResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// AzureFilesIdentityBasedAuthentication settings for Azure Files identity based authentication. +type AzureFilesIdentityBasedAuthentication struct { + // DirectoryServiceOptions - Indicates the directory service used. Possible values include: 'DirectoryServiceOptionsNone', 'DirectoryServiceOptionsAADDS', 'DirectoryServiceOptionsAD' + DirectoryServiceOptions DirectoryServiceOptions `json:"directoryServiceOptions,omitempty"` + // ActiveDirectoryProperties - Required if choose AD. + ActiveDirectoryProperties *ActiveDirectoryProperties `json:"activeDirectoryProperties,omitempty"` + // DefaultSharePermission - Default share permission for users using Kerberos authentication if RBAC role is not assigned. Possible values include: 'DefaultSharePermissionNone', 'DefaultSharePermissionStorageFileDataSmbShareReader', 'DefaultSharePermissionStorageFileDataSmbShareContributor', 'DefaultSharePermissionStorageFileDataSmbShareElevatedContributor', 'DefaultSharePermissionStorageFileDataSmbShareOwner' + DefaultSharePermission DefaultSharePermission `json:"defaultSharePermission,omitempty"` +} + +// BlobContainer properties of the blob container, including Id, resource name, resource type, Etag. +type BlobContainer struct { + autorest.Response `json:"-"` + // ContainerProperties - Properties of the blob container. + *ContainerProperties `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobContainer. +func (bc BlobContainer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bc.ContainerProperties != nil { + objectMap["properties"] = bc.ContainerProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BlobContainer struct. +func (bc *BlobContainer) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var containerProperties ContainerProperties + err = json.Unmarshal(*v, &containerProperties) + if err != nil { + return err + } + bc.ContainerProperties = &containerProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + bc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + bc.Type = &typeVar + } + } + } + + return nil +} + +// BlobContainersObjectLevelWormFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BlobContainersObjectLevelWormFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BlobContainersClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BlobContainersObjectLevelWormFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BlobContainersObjectLevelWormFuture.Result. +func (future *BlobContainersObjectLevelWormFuture) result(client BlobContainersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersObjectLevelWormFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("storage.BlobContainersObjectLevelWormFuture") + return + } + ar.Response = future.Response() + return +} + +// BlobInventoryPolicy the storage account blob inventory policy. +type BlobInventoryPolicy struct { + autorest.Response `json:"-"` + // BlobInventoryPolicyProperties - Returns the storage account blob inventory policy rules. + *BlobInventoryPolicyProperties `json:"properties,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobInventoryPolicy. +func (bip BlobInventoryPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bip.BlobInventoryPolicyProperties != nil { + objectMap["properties"] = bip.BlobInventoryPolicyProperties + } + if bip.SystemData != nil { + objectMap["systemData"] = bip.SystemData + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BlobInventoryPolicy struct. +func (bip *BlobInventoryPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var blobInventoryPolicyProperties BlobInventoryPolicyProperties + err = json.Unmarshal(*v, &blobInventoryPolicyProperties) + if err != nil { + return err + } + bip.BlobInventoryPolicyProperties = &blobInventoryPolicyProperties + } + case "systemData": + if v != nil { + var systemData SystemData + err = json.Unmarshal(*v, &systemData) + if err != nil { + return err + } + bip.SystemData = &systemData + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bip.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bip.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + bip.Type = &typeVar + } + } + } + + return nil +} + +// BlobInventoryPolicyDefinition an object that defines the blob inventory rule. +type BlobInventoryPolicyDefinition struct { + // Filters - An object that defines the filter set. + Filters *BlobInventoryPolicyFilter `json:"filters,omitempty"` + // Format - This is a required field, it specifies the format for the inventory files. Possible values include: 'FormatCsv', 'FormatParquet' + Format Format `json:"format,omitempty"` + // Schedule - This is a required field. This field is used to schedule an inventory formation. Possible values include: 'ScheduleDaily', 'ScheduleWeekly' + Schedule Schedule `json:"schedule,omitempty"` + // ObjectType - This is a required field. This field specifies the scope of the inventory created either at the blob or container level. Possible values include: 'ObjectTypeBlob', 'ObjectTypeContainer' + ObjectType ObjectType `json:"objectType,omitempty"` + // SchemaFields - This is a required field. This field specifies the fields and properties of the object to be included in the inventory. The Schema field value 'Name' is always required. The valid values for this field for the 'Blob' definition.objectType include 'Name, Creation-Time, Last-Modified, Content-Length, Content-MD5, BlobType, AccessTier, AccessTierChangeTime, Expiry-Time, hdi_isfolder, Owner, Group, Permissions, Acl, Snapshot, VersionId, IsCurrentVersion, Metadata, LastAccessTime'. The valid values for 'Container' definition.objectType include 'Name, Last-Modified, Metadata, LeaseStatus, LeaseState, LeaseDuration, PublicAccess, HasImmutabilityPolicy, HasLegalHold'. Schema field values 'Expiry-Time, hdi_isfolder, Owner, Group, Permissions, Acl' are valid only for Hns enabled accounts. + SchemaFields *[]string `json:"schemaFields,omitempty"` +} + +// BlobInventoryPolicyFilter an object that defines the blob inventory rule filter conditions. For 'Blob' +// definition.objectType all filter properties are applicable, 'blobTypes' is required and others are +// optional. For 'Container' definition.objectType only prefixMatch is applicable and is optional. +type BlobInventoryPolicyFilter struct { + // PrefixMatch - An array of strings for blob prefixes to be matched. + PrefixMatch *[]string `json:"prefixMatch,omitempty"` + // BlobTypes - An array of predefined enum values. Valid values include blockBlob, appendBlob, pageBlob. Hns accounts does not support pageBlobs. This field is required when definition.objectType property is set to 'Blob'. + BlobTypes *[]string `json:"blobTypes,omitempty"` + // IncludeBlobVersions - Includes blob versions in blob inventory when value is set to true. The definition.schemaFields values 'VersionId and IsCurrentVersion' are required if this property is set to true, else they must be excluded. + IncludeBlobVersions *bool `json:"includeBlobVersions,omitempty"` + // IncludeSnapshots - Includes blob snapshots in blob inventory when value is set to true. The definition.schemaFields value 'Snapshot' is required if this property is set to true, else it must be excluded. + IncludeSnapshots *bool `json:"includeSnapshots,omitempty"` +} + +// BlobInventoryPolicyProperties the storage account blob inventory policy properties. +type BlobInventoryPolicyProperties struct { + // LastModifiedTime - READ-ONLY; Returns the last modified date and time of the blob inventory policy. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // Policy - The storage account blob inventory policy object. It is composed of policy rules. + Policy *BlobInventoryPolicySchema `json:"policy,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobInventoryPolicyProperties. +func (bipp BlobInventoryPolicyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bipp.Policy != nil { + objectMap["policy"] = bipp.Policy + } + return json.Marshal(objectMap) +} + +// BlobInventoryPolicyRule an object that wraps the blob inventory rule. Each rule is uniquely defined by +// name. +type BlobInventoryPolicyRule struct { + // Enabled - Rule is enabled when set to true. + Enabled *bool `json:"enabled,omitempty"` + // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy. + Name *string `json:"name,omitempty"` + // Destination - Container name where blob inventory files are stored. Must be pre-created. + Destination *string `json:"destination,omitempty"` + // Definition - An object that defines the blob inventory policy rule. + Definition *BlobInventoryPolicyDefinition `json:"definition,omitempty"` +} + +// BlobInventoryPolicySchema the storage account blob inventory policy rules. +type BlobInventoryPolicySchema struct { + // Enabled - Policy is enabled if set to true. + Enabled *bool `json:"enabled,omitempty"` + // Type - The valid value is Inventory + Type *string `json:"type,omitempty"` + // Rules - The storage account blob inventory policy rules. The rule is applied when it is enabled. + Rules *[]BlobInventoryPolicyRule `json:"rules,omitempty"` +} + +// BlobRestoreParameters blob restore parameters +type BlobRestoreParameters struct { + // TimeToRestore - Restore blob to the specified time. + TimeToRestore *date.Time `json:"timeToRestore,omitempty"` + // BlobRanges - Blob ranges to restore. + BlobRanges *[]BlobRestoreRange `json:"blobRanges,omitempty"` +} + +// BlobRestoreRange blob range +type BlobRestoreRange struct { + // StartRange - Blob start range. This is inclusive. Empty means account start. + StartRange *string `json:"startRange,omitempty"` + // EndRange - Blob end range. This is exclusive. Empty means account end. + EndRange *string `json:"endRange,omitempty"` +} + +// BlobRestoreStatus blob restore status. +type BlobRestoreStatus struct { + autorest.Response `json:"-"` + // Status - READ-ONLY; The status of blob restore progress. Possible values are: - InProgress: Indicates that blob restore is ongoing. - Complete: Indicates that blob restore has been completed successfully. - Failed: Indicates that blob restore is failed. Possible values include: 'BlobRestoreProgressStatusInProgress', 'BlobRestoreProgressStatusComplete', 'BlobRestoreProgressStatusFailed' + Status BlobRestoreProgressStatus `json:"status,omitempty"` + // FailureReason - READ-ONLY; Failure reason when blob restore is failed. + FailureReason *string `json:"failureReason,omitempty"` + // RestoreID - READ-ONLY; Id for tracking blob restore request. + RestoreID *string `json:"restoreId,omitempty"` + // Parameters - READ-ONLY; Blob restore request parameters. + Parameters *BlobRestoreParameters `json:"parameters,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobRestoreStatus. +func (brs BlobRestoreStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// BlobServiceItems ... +type BlobServiceItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of blob services returned. + Value *[]BlobServiceProperties `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobServiceItems. +func (bsi BlobServiceItems) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// BlobServiceProperties the properties of a storage account’s Blob service. +type BlobServiceProperties struct { + autorest.Response `json:"-"` + // BlobServicePropertiesProperties - The properties of a storage account’s Blob service. + *BlobServicePropertiesProperties `json:"properties,omitempty"` + // Sku - READ-ONLY; Sku name and tier. + Sku *Sku `json:"sku,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobServiceProperties. +func (bsp BlobServiceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bsp.BlobServicePropertiesProperties != nil { + objectMap["properties"] = bsp.BlobServicePropertiesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BlobServiceProperties struct. +func (bsp *BlobServiceProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var blobServiceProperties BlobServicePropertiesProperties + err = json.Unmarshal(*v, &blobServiceProperties) + if err != nil { + return err + } + bsp.BlobServicePropertiesProperties = &blobServiceProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + bsp.Sku = &sku + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bsp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bsp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + bsp.Type = &typeVar + } + } + } + + return nil +} + +// BlobServicePropertiesProperties the properties of a storage account’s Blob service. +type BlobServicePropertiesProperties struct { + // Cors - Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service. + Cors *CorsRules `json:"cors,omitempty"` + // DefaultServiceVersion - DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions. + DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty"` + // DeleteRetentionPolicy - The blob service properties for blob soft delete. + DeleteRetentionPolicy *DeleteRetentionPolicy `json:"deleteRetentionPolicy,omitempty"` + // IsVersioningEnabled - Versioning is enabled if set to true. + IsVersioningEnabled *bool `json:"isVersioningEnabled,omitempty"` + // AutomaticSnapshotPolicyEnabled - Deprecated in favor of isVersioningEnabled property. + AutomaticSnapshotPolicyEnabled *bool `json:"automaticSnapshotPolicyEnabled,omitempty"` + // ChangeFeed - The blob service properties for change feed events. + ChangeFeed *ChangeFeed `json:"changeFeed,omitempty"` + // RestorePolicy - The blob service properties for blob restore policy. + RestorePolicy *RestorePolicyProperties `json:"restorePolicy,omitempty"` + // ContainerDeleteRetentionPolicy - The blob service properties for container soft delete. + ContainerDeleteRetentionPolicy *DeleteRetentionPolicy `json:"containerDeleteRetentionPolicy,omitempty"` + // LastAccessTimeTrackingPolicy - The blob service property to configure last access time based tracking policy. + LastAccessTimeTrackingPolicy *LastAccessTimeTrackingPolicy `json:"lastAccessTimeTrackingPolicy,omitempty"` +} + +// ChangeFeed the blob service properties for change feed events. +type ChangeFeed struct { + // Enabled - Indicates whether change feed event logging is enabled for the Blob service. + Enabled *bool `json:"enabled,omitempty"` + // RetentionInDays - Indicates the duration of changeFeed retention in days. Minimum value is 1 day and maximum value is 146000 days (400 years). A null value indicates an infinite retention of the change feed. + RetentionInDays *int32 `json:"retentionInDays,omitempty"` +} + +// CheckNameAvailabilityResult the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + // NameAvailable - READ-ONLY; Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used. + NameAvailable *bool `json:"nameAvailable,omitempty"` + // Reason - READ-ONLY; Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'ReasonAccountNameInvalid', 'ReasonAlreadyExists' + Reason Reason `json:"reason,omitempty"` + // Message - READ-ONLY; Gets an error message explaining the Reason value in more detail. + Message *string `json:"message,omitempty"` +} + +// MarshalJSON is the custom marshaler for CheckNameAvailabilityResult. +func (cnar CheckNameAvailabilityResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// CloudError an error response from the Storage service. +type CloudError struct { + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody an error response from the Storage service. +type CloudErrorBody struct { + // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + Code *string `json:"code,omitempty"` + // Message - A message describing the error, intended to be suitable for display in a user interface. + Message *string `json:"message,omitempty"` + // Target - The target of the particular error. For example, the name of the property in error. + Target *string `json:"target,omitempty"` + // Details - A list of additional details about the error. + Details *[]CloudErrorBody `json:"details,omitempty"` +} + +// ContainerProperties the properties of a container. +type ContainerProperties struct { + // Version - READ-ONLY; The version of the deleted blob container. + Version *string `json:"version,omitempty"` + // Deleted - READ-ONLY; Indicates whether the blob container was deleted. + Deleted *bool `json:"deleted,omitempty"` + // DeletedTime - READ-ONLY; Blob container deletion time. + DeletedTime *date.Time `json:"deletedTime,omitempty"` + // RemainingRetentionDays - READ-ONLY; Remaining retention days for soft deleted blob container. + RemainingRetentionDays *int32 `json:"remainingRetentionDays,omitempty"` + // DefaultEncryptionScope - Default the container to use specified encryption scope for all writes. + DefaultEncryptionScope *string `json:"defaultEncryptionScope,omitempty"` + // DenyEncryptionScopeOverride - Block override of encryption scope from the container default. + DenyEncryptionScopeOverride *bool `json:"denyEncryptionScopeOverride,omitempty"` + // PublicAccess - Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' + PublicAccess PublicAccess `json:"publicAccess,omitempty"` + // LastModifiedTime - READ-ONLY; Returns the date and time the container was last modified. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // LeaseStatus - READ-ONLY; The lease status of the container. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked' + LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"` + // LeaseState - READ-ONLY; Lease state of the container. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken' + LeaseState LeaseState `json:"leaseState,omitempty"` + // LeaseDuration - READ-ONLY; Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed' + LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"` + // Metadata - A name-value pair to associate with the container as metadata. + Metadata map[string]*string `json:"metadata"` + // ImmutabilityPolicy - READ-ONLY; The ImmutabilityPolicy property of the container. + ImmutabilityPolicy *ImmutabilityPolicyProperties `json:"immutabilityPolicy,omitempty"` + // LegalHold - READ-ONLY; The LegalHold property of the container. + LegalHold *LegalHoldProperties `json:"legalHold,omitempty"` + // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. + HasLegalHold *bool `json:"hasLegalHold,omitempty"` + // HasImmutabilityPolicy - READ-ONLY; The hasImmutabilityPolicy public property is set to true by SRP if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public property is set to false by SRP if ImmutabilityPolicy has not been created for this container. + HasImmutabilityPolicy *bool `json:"hasImmutabilityPolicy,omitempty"` + // ImmutableStorageWithVersioning - The object level immutability property of the container. The property is immutable and can only be set to true at the container creation time. Existing containers must undergo a migration process. + ImmutableStorageWithVersioning *ImmutableStorageWithVersioning `json:"immutableStorageWithVersioning,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContainerProperties. +func (cp ContainerProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cp.DefaultEncryptionScope != nil { + objectMap["defaultEncryptionScope"] = cp.DefaultEncryptionScope + } + if cp.DenyEncryptionScopeOverride != nil { + objectMap["denyEncryptionScopeOverride"] = cp.DenyEncryptionScopeOverride + } + if cp.PublicAccess != "" { + objectMap["publicAccess"] = cp.PublicAccess + } + if cp.Metadata != nil { + objectMap["metadata"] = cp.Metadata + } + if cp.ImmutableStorageWithVersioning != nil { + objectMap["immutableStorageWithVersioning"] = cp.ImmutableStorageWithVersioning + } + return json.Marshal(objectMap) +} + +// CorsRule specifies a CORS rule for the Blob service. +type CorsRule struct { + // AllowedOrigins - Required if CorsRule element is present. A list of origin domains that will be allowed via CORS, or "*" to allow all domains + AllowedOrigins *[]string `json:"allowedOrigins,omitempty"` + // AllowedMethods - Required if CorsRule element is present. A list of HTTP methods that are allowed to be executed by the origin. + AllowedMethods *[]string `json:"allowedMethods,omitempty"` + // MaxAgeInSeconds - Required if CorsRule element is present. The number of seconds that the client/browser should cache a preflight response. + MaxAgeInSeconds *int32 `json:"maxAgeInSeconds,omitempty"` + // ExposedHeaders - Required if CorsRule element is present. A list of response headers to expose to CORS clients. + ExposedHeaders *[]string `json:"exposedHeaders,omitempty"` + // AllowedHeaders - Required if CorsRule element is present. A list of headers allowed to be part of the cross-origin request. + AllowedHeaders *[]string `json:"allowedHeaders,omitempty"` +} + +// CorsRules sets the CORS rules. You can include up to five CorsRule elements in the request. +type CorsRules struct { + // CorsRules - The List of CORS rules. You can include up to five CorsRule elements in the request. + CorsRules *[]CorsRule `json:"corsRules,omitempty"` +} + +// CustomDomain the custom domain assigned to this storage account. This can be set via Update. +type CustomDomain struct { + // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source. + Name *string `json:"name,omitempty"` + // UseSubDomainName - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. + UseSubDomainName *bool `json:"useSubDomainName,omitempty"` +} + +// DateAfterCreation object to define the number of days after creation. +type DateAfterCreation struct { + // DaysAfterCreationGreaterThan - Value indicating the age in days after creation + DaysAfterCreationGreaterThan *float64 `json:"daysAfterCreationGreaterThan,omitempty"` +} + +// DateAfterModification object to define the number of days after object last modification Or last access. +// Properties daysAfterModificationGreaterThan and daysAfterLastAccessTimeGreaterThan are mutually +// exclusive. +type DateAfterModification struct { + // DaysAfterModificationGreaterThan - Value indicating the age in days after last modification + DaysAfterModificationGreaterThan *float64 `json:"daysAfterModificationGreaterThan,omitempty"` + // DaysAfterLastAccessTimeGreaterThan - Value indicating the age in days after last blob access. This property can only be used in conjunction with last access time tracking policy + DaysAfterLastAccessTimeGreaterThan *float64 `json:"daysAfterLastAccessTimeGreaterThan,omitempty"` +} + +// DeletedAccount deleted storage account +type DeletedAccount struct { + autorest.Response `json:"-"` + // DeletedAccountProperties - Properties of the deleted account. + *DeletedAccountProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedAccount. +func (da DeletedAccount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if da.DeletedAccountProperties != nil { + objectMap["properties"] = da.DeletedAccountProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DeletedAccount struct. +func (da *DeletedAccount) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var deletedAccountProperties DeletedAccountProperties + err = json.Unmarshal(*v, &deletedAccountProperties) + if err != nil { + return err + } + da.DeletedAccountProperties = &deletedAccountProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + da.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + da.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + da.Type = &typeVar + } + } + } + + return nil +} + +// DeletedAccountListResult the response from the List Deleted Accounts operation. +type DeletedAccountListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Gets the list of deleted accounts and their properties. + Value *[]DeletedAccount `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of deleted accounts. Returned when total number of requested deleted accounts exceed maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedAccountListResult. +func (dalr DeletedAccountListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedAccountListResultIterator provides access to a complete listing of DeletedAccount values. +type DeletedAccountListResultIterator struct { + i int + page DeletedAccountListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedAccountListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedAccountListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedAccountListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedAccountListResultIterator) Response() DeletedAccountListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedAccountListResultIterator) Value() DeletedAccount { + if !iter.page.NotDone() { + return DeletedAccount{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedAccountListResultIterator type. +func NewDeletedAccountListResultIterator(page DeletedAccountListResultPage) DeletedAccountListResultIterator { + return DeletedAccountListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dalr DeletedAccountListResult) IsEmpty() bool { + return dalr.Value == nil || len(*dalr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (dalr DeletedAccountListResult) hasNextLink() bool { + return dalr.NextLink != nil && len(*dalr.NextLink) != 0 +} + +// deletedAccountListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dalr DeletedAccountListResult) deletedAccountListResultPreparer(ctx context.Context) (*http.Request, error) { + if !dalr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dalr.NextLink))) +} + +// DeletedAccountListResultPage contains a page of DeletedAccount values. +type DeletedAccountListResultPage struct { + fn func(context.Context, DeletedAccountListResult) (DeletedAccountListResult, error) + dalr DeletedAccountListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedAccountListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.dalr) + if err != nil { + return err + } + page.dalr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedAccountListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedAccountListResultPage) NotDone() bool { + return !page.dalr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedAccountListResultPage) Response() DeletedAccountListResult { + return page.dalr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedAccountListResultPage) Values() []DeletedAccount { + if page.dalr.IsEmpty() { + return nil + } + return *page.dalr.Value +} + +// Creates a new instance of the DeletedAccountListResultPage type. +func NewDeletedAccountListResultPage(cur DeletedAccountListResult, getNextPage func(context.Context, DeletedAccountListResult) (DeletedAccountListResult, error)) DeletedAccountListResultPage { + return DeletedAccountListResultPage{ + fn: getNextPage, + dalr: cur, + } +} + +// DeletedAccountProperties attributes of a deleted storage account. +type DeletedAccountProperties struct { + // StorageAccountResourceID - READ-ONLY; Full resource id of the original storage account. + StorageAccountResourceID *string `json:"storageAccountResourceId,omitempty"` + // Location - READ-ONLY; Location of the deleted account. + Location *string `json:"location,omitempty"` + // RestoreReference - READ-ONLY; Can be used to attempt recovering this deleted account via PutStorageAccount API. + RestoreReference *string `json:"restoreReference,omitempty"` + // CreationTime - READ-ONLY; Creation time of the deleted account. + CreationTime *string `json:"creationTime,omitempty"` + // DeletionTime - READ-ONLY; Deletion time of the deleted account. + DeletionTime *string `json:"deletionTime,omitempty"` +} + +// MarshalJSON is the custom marshaler for DeletedAccountProperties. +func (dap DeletedAccountProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// DeletedShare the deleted share to be restored. +type DeletedShare struct { + // DeletedShareName - Required. Identify the name of the deleted share that will be restored. + DeletedShareName *string `json:"deletedShareName,omitempty"` + // DeletedShareVersion - Required. Identify the version of the deleted share that will be restored. + DeletedShareVersion *string `json:"deletedShareVersion,omitempty"` +} + +// DeleteRetentionPolicy the service properties for soft delete. +type DeleteRetentionPolicy struct { + // Enabled - Indicates whether DeleteRetentionPolicy is enabled. + Enabled *bool `json:"enabled,omitempty"` + // Days - Indicates the number of days that the deleted item should be retained. The minimum specified value can be 1 and the maximum value can be 365. + Days *int32 `json:"days,omitempty"` +} + +// Dimension dimension of blobs, possibly be blob type or access tier. +type Dimension struct { + // Name - Display name of dimension. + Name *string `json:"name,omitempty"` + // DisplayName - Display name of dimension. + DisplayName *string `json:"displayName,omitempty"` +} + +// Encryption the encryption settings on the storage account. +type Encryption struct { + // Services - List of services which support encryption. + Services *EncryptionServices `json:"services,omitempty"` + // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include: 'KeySourceMicrosoftStorage', 'KeySourceMicrosoftKeyvault' + KeySource KeySource `json:"keySource,omitempty"` + // RequireInfrastructureEncryption - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. + RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"` + // KeyVaultProperties - Properties provided by key vault. + KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` + // EncryptionIdentity - The identity to be used with service-side encryption at rest. + EncryptionIdentity *EncryptionIdentity `json:"identity,omitempty"` +} + +// EncryptionIdentity encryption identity for the storage account. +type EncryptionIdentity struct { + // EncryptionUserAssignedIdentity - Resource identifier of the UserAssigned identity to be associated with server-side encryption on the storage account. + EncryptionUserAssignedIdentity *string `json:"userAssignedIdentity,omitempty"` +} + +// EncryptionScope the Encryption Scope resource. +type EncryptionScope struct { + autorest.Response `json:"-"` + // EncryptionScopeProperties - Properties of the encryption scope. + *EncryptionScopeProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionScope. +func (es EncryptionScope) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if es.EncryptionScopeProperties != nil { + objectMap["properties"] = es.EncryptionScopeProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for EncryptionScope struct. +func (es *EncryptionScope) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var encryptionScopeProperties EncryptionScopeProperties + err = json.Unmarshal(*v, &encryptionScopeProperties) + if err != nil { + return err + } + es.EncryptionScopeProperties = &encryptionScopeProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + es.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + es.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + es.Type = &typeVar + } + } + } + + return nil +} + +// EncryptionScopeKeyVaultProperties the key vault properties for the encryption scope. This is a required +// field if encryption scope 'source' attribute is set to 'Microsoft.KeyVault'. +type EncryptionScopeKeyVaultProperties struct { + // KeyURI - The object identifier for a key vault key object. When applied, the encryption scope will use the key referenced by the identifier to enable customer-managed key support on this encryption scope. + KeyURI *string `json:"keyUri,omitempty"` + // CurrentVersionedKeyIdentifier - READ-ONLY; The object identifier of the current versioned Key Vault Key in use. + CurrentVersionedKeyIdentifier *string `json:"currentVersionedKeyIdentifier,omitempty"` + // LastKeyRotationTimestamp - READ-ONLY; Timestamp of last rotation of the Key Vault Key. + LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionScopeKeyVaultProperties. +func (eskvp EncryptionScopeKeyVaultProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if eskvp.KeyURI != nil { + objectMap["keyUri"] = eskvp.KeyURI + } + return json.Marshal(objectMap) +} + +// EncryptionScopeListResult list of encryption scopes requested, and if paging is required, a URL to the +// next page of encryption scopes. +type EncryptionScopeListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of encryption scopes requested. + Value *[]EncryptionScope `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of encryption scopes. Returned when total number of requested encryption scopes exceeds the maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionScopeListResult. +func (eslr EncryptionScopeListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// EncryptionScopeListResultIterator provides access to a complete listing of EncryptionScope values. +type EncryptionScopeListResultIterator struct { + i int + page EncryptionScopeListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *EncryptionScopeListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopeListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *EncryptionScopeListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter EncryptionScopeListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter EncryptionScopeListResultIterator) Response() EncryptionScopeListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter EncryptionScopeListResultIterator) Value() EncryptionScope { + if !iter.page.NotDone() { + return EncryptionScope{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the EncryptionScopeListResultIterator type. +func NewEncryptionScopeListResultIterator(page EncryptionScopeListResultPage) EncryptionScopeListResultIterator { + return EncryptionScopeListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (eslr EncryptionScopeListResult) IsEmpty() bool { + return eslr.Value == nil || len(*eslr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (eslr EncryptionScopeListResult) hasNextLink() bool { + return eslr.NextLink != nil && len(*eslr.NextLink) != 0 +} + +// encryptionScopeListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (eslr EncryptionScopeListResult) encryptionScopeListResultPreparer(ctx context.Context) (*http.Request, error) { + if !eslr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(eslr.NextLink))) +} + +// EncryptionScopeListResultPage contains a page of EncryptionScope values. +type EncryptionScopeListResultPage struct { + fn func(context.Context, EncryptionScopeListResult) (EncryptionScopeListResult, error) + eslr EncryptionScopeListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *EncryptionScopeListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopeListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.eslr) + if err != nil { + return err + } + page.eslr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *EncryptionScopeListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page EncryptionScopeListResultPage) NotDone() bool { + return !page.eslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page EncryptionScopeListResultPage) Response() EncryptionScopeListResult { + return page.eslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page EncryptionScopeListResultPage) Values() []EncryptionScope { + if page.eslr.IsEmpty() { + return nil + } + return *page.eslr.Value +} + +// Creates a new instance of the EncryptionScopeListResultPage type. +func NewEncryptionScopeListResultPage(cur EncryptionScopeListResult, getNextPage func(context.Context, EncryptionScopeListResult) (EncryptionScopeListResult, error)) EncryptionScopeListResultPage { + return EncryptionScopeListResultPage{ + fn: getNextPage, + eslr: cur, + } +} + +// EncryptionScopeProperties properties of the encryption scope. +type EncryptionScopeProperties struct { + // Source - The provider for the encryption scope. Possible values (case-insensitive): Microsoft.Storage, Microsoft.KeyVault. Possible values include: 'EncryptionScopeSourceMicrosoftStorage', 'EncryptionScopeSourceMicrosoftKeyVault' + Source EncryptionScopeSource `json:"source,omitempty"` + // State - The state of the encryption scope. Possible values (case-insensitive): Enabled, Disabled. Possible values include: 'EncryptionScopeStateEnabled', 'EncryptionScopeStateDisabled' + State EncryptionScopeState `json:"state,omitempty"` + // CreationTime - READ-ONLY; Gets the creation date and time of the encryption scope in UTC. + CreationTime *date.Time `json:"creationTime,omitempty"` + // LastModifiedTime - READ-ONLY; Gets the last modification date and time of the encryption scope in UTC. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // KeyVaultProperties - The key vault properties for the encryption scope. This is a required field if encryption scope 'source' attribute is set to 'Microsoft.KeyVault'. + KeyVaultProperties *EncryptionScopeKeyVaultProperties `json:"keyVaultProperties,omitempty"` + // RequireInfrastructureEncryption - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. + RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionScopeProperties. +func (esp EncryptionScopeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if esp.Source != "" { + objectMap["source"] = esp.Source + } + if esp.State != "" { + objectMap["state"] = esp.State + } + if esp.KeyVaultProperties != nil { + objectMap["keyVaultProperties"] = esp.KeyVaultProperties + } + if esp.RequireInfrastructureEncryption != nil { + objectMap["requireInfrastructureEncryption"] = esp.RequireInfrastructureEncryption + } + return json.Marshal(objectMap) +} + +// EncryptionService a service that allows server-side encryption to be used. +type EncryptionService struct { + // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored. + Enabled *bool `json:"enabled,omitempty"` + // LastEnabledTime - READ-ONLY; Gets a rough estimate of the date/time when the encryption was last enabled by the user. Only returned when encryption is enabled. There might be some unencrypted blobs which were written after this time, as it is just a rough estimate. + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` + // KeyType - Encryption key type to be used for the encryption service. 'Account' key type implies that an account-scoped encryption key will be used. 'Service' key type implies that a default service key is used. Possible values include: 'KeyTypeService', 'KeyTypeAccount' + KeyType KeyType `json:"keyType,omitempty"` +} + +// MarshalJSON is the custom marshaler for EncryptionService. +func (es EncryptionService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if es.Enabled != nil { + objectMap["enabled"] = es.Enabled + } + if es.KeyType != "" { + objectMap["keyType"] = es.KeyType + } + return json.Marshal(objectMap) +} + +// EncryptionServices a list of services that support encryption. +type EncryptionServices struct { + // Blob - The encryption function of the blob storage service. + Blob *EncryptionService `json:"blob,omitempty"` + // File - The encryption function of the file storage service. + File *EncryptionService `json:"file,omitempty"` + // Table - The encryption function of the table storage service. + Table *EncryptionService `json:"table,omitempty"` + // Queue - The encryption function of the queue storage service. + Queue *EncryptionService `json:"queue,omitempty"` +} + +// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, table, web or dfs +// object. +type Endpoints struct { + // Blob - READ-ONLY; Gets the blob endpoint. + Blob *string `json:"blob,omitempty"` + // Queue - READ-ONLY; Gets the queue endpoint. + Queue *string `json:"queue,omitempty"` + // Table - READ-ONLY; Gets the table endpoint. + Table *string `json:"table,omitempty"` + // File - READ-ONLY; Gets the file endpoint. + File *string `json:"file,omitempty"` + // Web - READ-ONLY; Gets the web endpoint. + Web *string `json:"web,omitempty"` + // Dfs - READ-ONLY; Gets the dfs endpoint. + Dfs *string `json:"dfs,omitempty"` + // MicrosoftEndpoints - Gets the microsoft routing storage endpoints. + MicrosoftEndpoints *AccountMicrosoftEndpoints `json:"microsoftEndpoints,omitempty"` + // InternetEndpoints - Gets the internet routing storage endpoints + InternetEndpoints *AccountInternetEndpoints `json:"internetEndpoints,omitempty"` +} + +// MarshalJSON is the custom marshaler for Endpoints. +func (e Endpoints) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if e.MicrosoftEndpoints != nil { + objectMap["microsoftEndpoints"] = e.MicrosoftEndpoints + } + if e.InternetEndpoints != nil { + objectMap["internetEndpoints"] = e.InternetEndpoints + } + return json.Marshal(objectMap) +} + +// ErrorResponse an error response from the storage resource provider. +type ErrorResponse struct { + // Error - Azure Storage Resource Provider error response body. + Error *ErrorResponseBody `json:"error,omitempty"` +} + +// ErrorResponseBody error response body contract. +type ErrorResponseBody struct { + // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + Code *string `json:"code,omitempty"` + // Message - A message describing the error, intended to be suitable for display in a user interface. + Message *string `json:"message,omitempty"` +} + +// ExtendedLocation the complex type of the extended location. +type ExtendedLocation struct { + // Name - The name of the extended location. + Name *string `json:"name,omitempty"` + // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone' + Type ExtendedLocationTypes `json:"type,omitempty"` +} + +// FileServiceItems ... +type FileServiceItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of file services returned. + Value *[]FileServiceProperties `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileServiceItems. +func (fsi FileServiceItems) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// FileServiceProperties the properties of File services in storage account. +type FileServiceProperties struct { + autorest.Response `json:"-"` + // FileServicePropertiesProperties - The properties of File services in storage account. + *FileServicePropertiesProperties `json:"properties,omitempty"` + // Sku - READ-ONLY; Sku name and tier. + Sku *Sku `json:"sku,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileServiceProperties. +func (fsp FileServiceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fsp.FileServicePropertiesProperties != nil { + objectMap["properties"] = fsp.FileServicePropertiesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FileServiceProperties struct. +func (fsp *FileServiceProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var fileServiceProperties FileServicePropertiesProperties + err = json.Unmarshal(*v, &fileServiceProperties) + if err != nil { + return err + } + fsp.FileServicePropertiesProperties = &fileServiceProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + fsp.Sku = &sku + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fsp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fsp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fsp.Type = &typeVar + } + } + } + + return nil +} + +// FileServicePropertiesProperties the properties of File services in storage account. +type FileServicePropertiesProperties struct { + // Cors - Specifies CORS rules for the File service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the File service. + Cors *CorsRules `json:"cors,omitempty"` + // ShareDeleteRetentionPolicy - The file service properties for share soft delete. + ShareDeleteRetentionPolicy *DeleteRetentionPolicy `json:"shareDeleteRetentionPolicy,omitempty"` + // ProtocolSettings - Protocol settings for file service + ProtocolSettings *ProtocolSettings `json:"protocolSettings,omitempty"` +} + +// FileShare properties of the file share, including Id, resource name, resource type, Etag. +type FileShare struct { + autorest.Response `json:"-"` + // FileShareProperties - Properties of the file share. + *FileShareProperties `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShare. +func (fs FileShare) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fs.FileShareProperties != nil { + objectMap["properties"] = fs.FileShareProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FileShare struct. +func (fs *FileShare) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var fileShareProperties FileShareProperties + err = json.Unmarshal(*v, &fileShareProperties) + if err != nil { + return err + } + fs.FileShareProperties = &fileShareProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fs.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fs.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fs.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fs.Type = &typeVar + } + } + } + + return nil +} + +// FileShareItem the file share properties be listed out. +type FileShareItem struct { + // FileShareProperties - The file share properties be listed out. + *FileShareProperties `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShareItem. +func (fsi FileShareItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fsi.FileShareProperties != nil { + objectMap["properties"] = fsi.FileShareProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FileShareItem struct. +func (fsi *FileShareItem) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var fileShareProperties FileShareProperties + err = json.Unmarshal(*v, &fileShareProperties) + if err != nil { + return err + } + fsi.FileShareProperties = &fileShareProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fsi.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fsi.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fsi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + fsi.Type = &typeVar + } + } + } + + return nil +} + +// FileShareItems response schema. Contains list of shares returned, and if paging is requested or +// required, a URL to next page of shares. +type FileShareItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of file shares returned. + Value *[]FileShareItem `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of shares. Returned when total number of requested shares exceed maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShareItems. +func (fsi FileShareItems) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// FileShareItemsIterator provides access to a complete listing of FileShareItem values. +type FileShareItemsIterator struct { + i int + page FileShareItemsPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *FileShareItemsIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *FileShareItemsIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter FileShareItemsIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter FileShareItemsIterator) Response() FileShareItems { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter FileShareItemsIterator) Value() FileShareItem { + if !iter.page.NotDone() { + return FileShareItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the FileShareItemsIterator type. +func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator { + return FileShareItemsIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (fsi FileShareItems) IsEmpty() bool { + return fsi.Value == nil || len(*fsi.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (fsi FileShareItems) hasNextLink() bool { + return fsi.NextLink != nil && len(*fsi.NextLink) != 0 +} + +// fileShareItemsPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (fsi FileShareItems) fileShareItemsPreparer(ctx context.Context) (*http.Request, error) { + if !fsi.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(fsi.NextLink))) +} + +// FileShareItemsPage contains a page of FileShareItem values. +type FileShareItemsPage struct { + fn func(context.Context, FileShareItems) (FileShareItems, error) + fsi FileShareItems +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *FileShareItemsPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.fsi) + if err != nil { + return err + } + page.fsi = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *FileShareItemsPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page FileShareItemsPage) NotDone() bool { + return !page.fsi.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page FileShareItemsPage) Response() FileShareItems { + return page.fsi +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page FileShareItemsPage) Values() []FileShareItem { + if page.fsi.IsEmpty() { + return nil + } + return *page.fsi.Value +} + +// Creates a new instance of the FileShareItemsPage type. +func NewFileShareItemsPage(cur FileShareItems, getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage { + return FileShareItemsPage{ + fn: getNextPage, + fsi: cur, + } +} + +// FileShareProperties the properties of the file share. +type FileShareProperties struct { + // LastModifiedTime - READ-ONLY; Returns the date and time the share was last modified. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // Metadata - A name-value pair to associate with the share as metadata. + Metadata map[string]*string `json:"metadata"` + // ShareQuota - The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). For Large File Shares, the maximum size is 102400. + ShareQuota *int32 `json:"shareQuota,omitempty"` + // EnabledProtocols - The authentication protocol that is used for the file share. Can only be specified when creating a share. Possible values include: 'EnabledProtocolsSMB', 'EnabledProtocolsNFS' + EnabledProtocols EnabledProtocols `json:"enabledProtocols,omitempty"` + // RootSquash - The property is for NFS share only. The default is NoRootSquash. Possible values include: 'RootSquashTypeNoRootSquash', 'RootSquashTypeRootSquash', 'RootSquashTypeAllSquash' + RootSquash RootSquashType `json:"rootSquash,omitempty"` + // Version - READ-ONLY; The version of the share. + Version *string `json:"version,omitempty"` + // Deleted - READ-ONLY; Indicates whether the share was deleted. + Deleted *bool `json:"deleted,omitempty"` + // DeletedTime - READ-ONLY; The deleted time if the share was deleted. + DeletedTime *date.Time `json:"deletedTime,omitempty"` + // RemainingRetentionDays - READ-ONLY; Remaining retention days for share that was soft deleted. + RemainingRetentionDays *int32 `json:"remainingRetentionDays,omitempty"` + // AccessTier - Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. Possible values include: 'ShareAccessTierTransactionOptimized', 'ShareAccessTierHot', 'ShareAccessTierCool', 'ShareAccessTierPremium' + AccessTier ShareAccessTier `json:"accessTier,omitempty"` + // AccessTierChangeTime - READ-ONLY; Indicates the last modification time for share access tier. + AccessTierChangeTime *date.Time `json:"accessTierChangeTime,omitempty"` + // AccessTierStatus - READ-ONLY; Indicates if there is a pending transition for access tier. + AccessTierStatus *string `json:"accessTierStatus,omitempty"` + // ShareUsageBytes - READ-ONLY; The approximate size of the data stored on the share. Note that this value may not include all recently created or recently resized files. + ShareUsageBytes *int64 `json:"shareUsageBytes,omitempty"` + // LeaseStatus - READ-ONLY; The lease status of the share. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked' + LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"` + // LeaseState - READ-ONLY; Lease state of the share. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken' + LeaseState LeaseState `json:"leaseState,omitempty"` + // LeaseDuration - READ-ONLY; Specifies whether the lease on a share is of infinite or fixed duration, only when the share is leased. Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed' + LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"` + // SignedIdentifiers - List of stored access policies specified on the share. + SignedIdentifiers *[]SignedIdentifier `json:"signedIdentifiers,omitempty"` + // SnapshotTime - READ-ONLY; Creation time of share snapshot returned in the response of list shares with expand param "snapshots". + SnapshotTime *date.Time `json:"snapshotTime,omitempty"` +} + +// MarshalJSON is the custom marshaler for FileShareProperties. +func (fsp FileShareProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fsp.Metadata != nil { + objectMap["metadata"] = fsp.Metadata + } + if fsp.ShareQuota != nil { + objectMap["shareQuota"] = fsp.ShareQuota + } + if fsp.EnabledProtocols != "" { + objectMap["enabledProtocols"] = fsp.EnabledProtocols + } + if fsp.RootSquash != "" { + objectMap["rootSquash"] = fsp.RootSquash + } + if fsp.AccessTier != "" { + objectMap["accessTier"] = fsp.AccessTier + } + if fsp.SignedIdentifiers != nil { + objectMap["signedIdentifiers"] = fsp.SignedIdentifiers + } + return json.Marshal(objectMap) +} + +// GeoReplicationStats statistics related to replication for storage account's Blob, Table, Queue and File +// services. It is only available when geo-redundant replication is enabled for the storage account. +type GeoReplicationStats struct { + // Status - READ-ONLY; The status of the secondary location. Possible values are: - Live: Indicates that the secondary location is active and operational. - Bootstrap: Indicates initial synchronization from the primary location to the secondary location is in progress.This typically occurs when replication is first enabled. - Unavailable: Indicates that the secondary location is temporarily unavailable. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable' + Status GeoReplicationStatus `json:"status,omitempty"` + // LastSyncTime - READ-ONLY; All primary writes preceding this UTC date/time value are guaranteed to be available for read operations. Primary writes following this point in time may or may not be available for reads. Element may be default value if value of LastSyncTime is not available, this can happen if secondary is offline or we are in bootstrap. + LastSyncTime *date.Time `json:"lastSyncTime,omitempty"` + // CanFailover - READ-ONLY; A boolean flag which indicates whether or not account failover is supported for the account. + CanFailover *bool `json:"canFailover,omitempty"` +} + +// MarshalJSON is the custom marshaler for GeoReplicationStats. +func (grs GeoReplicationStats) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// Identity identity for the resource. +type Identity struct { + // PrincipalID - READ-ONLY; The principal ID of resource identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant ID of resource. + TenantID *string `json:"tenantId,omitempty"` + // Type - The identity type. Possible values include: 'IdentityTypeNone', 'IdentityTypeSystemAssigned', 'IdentityTypeUserAssigned', 'IdentityTypeSystemAssignedUserAssigned' + Type IdentityType `json:"type,omitempty"` + // UserAssignedIdentities - Gets or sets a list of key value pairs that describe the set of User Assigned identities that will be used with this storage account. The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is permitted here. + UserAssignedIdentities map[string]*UserAssignedIdentity `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for Identity. +func (i Identity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if i.Type != "" { + objectMap["type"] = i.Type + } + if i.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = i.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// ImmutabilityPolicy the ImmutabilityPolicy property of a blob container, including Id, resource name, +// resource type, Etag. +type ImmutabilityPolicy struct { + autorest.Response `json:"-"` + // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container. + *ImmutabilityPolicyProperty `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmutabilityPolicy. +func (IP ImmutabilityPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if IP.ImmutabilityPolicyProperty != nil { + objectMap["properties"] = IP.ImmutabilityPolicyProperty + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicy struct. +func (IP *ImmutabilityPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var immutabilityPolicyProperty ImmutabilityPolicyProperty + err = json.Unmarshal(*v, &immutabilityPolicyProperty) + if err != nil { + return err + } + IP.ImmutabilityPolicyProperty = &immutabilityPolicyProperty + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + IP.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + IP.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + IP.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + IP.Type = &typeVar + } + } + } + + return nil +} + +// ImmutabilityPolicyProperties the properties of an ImmutabilityPolicy of a blob container. +type ImmutabilityPolicyProperties struct { + // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container. + *ImmutabilityPolicyProperty `json:"properties,omitempty"` + // Etag - READ-ONLY; ImmutabilityPolicy Etag. + Etag *string `json:"etag,omitempty"` + // UpdateHistory - READ-ONLY; The ImmutabilityPolicy update history of the blob container. + UpdateHistory *[]UpdateHistoryProperty `json:"updateHistory,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperties. +func (ipp ImmutabilityPolicyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ipp.ImmutabilityPolicyProperty != nil { + objectMap["properties"] = ipp.ImmutabilityPolicyProperty + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicyProperties struct. +func (ipp *ImmutabilityPolicyProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var immutabilityPolicyProperty ImmutabilityPolicyProperty + err = json.Unmarshal(*v, &immutabilityPolicyProperty) + if err != nil { + return err + } + ipp.ImmutabilityPolicyProperty = &immutabilityPolicyProperty + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + ipp.Etag = &etag + } + case "updateHistory": + if v != nil { + var updateHistory []UpdateHistoryProperty + err = json.Unmarshal(*v, &updateHistory) + if err != nil { + return err + } + ipp.UpdateHistory = &updateHistory + } + } + } + + return nil +} + +// ImmutabilityPolicyProperty the properties of an ImmutabilityPolicy of a blob container. +type ImmutabilityPolicyProperty struct { + // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days. + ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"` + // State - READ-ONLY; The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. Possible values include: 'ImmutabilityPolicyStateLocked', 'ImmutabilityPolicyStateUnlocked' + State ImmutabilityPolicyState `json:"state,omitempty"` + // AllowProtectedAppendWrites - This property can only be changed for unlocked time-based retention policies. When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. This property cannot be changed with ExtendImmutabilityPolicy API + AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperty. +func (ipp ImmutabilityPolicyProperty) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ipp.ImmutabilityPeriodSinceCreationInDays != nil { + objectMap["immutabilityPeriodSinceCreationInDays"] = ipp.ImmutabilityPeriodSinceCreationInDays + } + if ipp.AllowProtectedAppendWrites != nil { + objectMap["allowProtectedAppendWrites"] = ipp.AllowProtectedAppendWrites + } + return json.Marshal(objectMap) +} + +// ImmutableStorageWithVersioning object level immutability properties of the container. +type ImmutableStorageWithVersioning struct { + // Enabled - This is an immutable property, when set to true it enables object level immutability at the container level. + Enabled *bool `json:"enabled,omitempty"` + // TimeStamp - READ-ONLY; Returns the date and time the object level immutability was enabled. + TimeStamp *date.Time `json:"timeStamp,omitempty"` + // MigrationState - READ-ONLY; This property denotes the container level immutability to object level immutability migration state. Possible values include: 'MigrationStateInProgress', 'MigrationStateCompleted' + MigrationState MigrationState `json:"migrationState,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmutableStorageWithVersioning. +func (iswv ImmutableStorageWithVersioning) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if iswv.Enabled != nil { + objectMap["enabled"] = iswv.Enabled + } + return json.Marshal(objectMap) +} + +// IPRule IP rule with specific IP or IP range in CIDR format. +type IPRule struct { + // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed. + IPAddressOrRange *string `json:"value,omitempty"` + // Action - The action of IP ACL rule. Possible values include: 'ActionAllow' + Action Action `json:"action,omitempty"` +} + +// KeyCreationTime storage account keys creation time. +type KeyCreationTime struct { + Key1 *date.Time `json:"key1,omitempty"` + Key2 *date.Time `json:"key2,omitempty"` +} + +// KeyPolicy keyPolicy assigned to the storage account. +type KeyPolicy struct { + // KeyExpirationPeriodInDays - The key expiration period in days. + KeyExpirationPeriodInDays *int32 `json:"keyExpirationPeriodInDays,omitempty"` +} + +// KeyVaultProperties properties of key vault. +type KeyVaultProperties struct { + // KeyName - The name of KeyVault key. + KeyName *string `json:"keyname,omitempty"` + // KeyVersion - The version of KeyVault key. + KeyVersion *string `json:"keyversion,omitempty"` + // KeyVaultURI - The Uri of KeyVault. + KeyVaultURI *string `json:"keyvaulturi,omitempty"` + // CurrentVersionedKeyIdentifier - READ-ONLY; The object identifier of the current versioned Key Vault Key in use. + CurrentVersionedKeyIdentifier *string `json:"currentVersionedKeyIdentifier,omitempty"` + // LastKeyRotationTimestamp - READ-ONLY; Timestamp of last rotation of the Key Vault Key. + LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"` +} + +// MarshalJSON is the custom marshaler for KeyVaultProperties. +func (kvp KeyVaultProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if kvp.KeyName != nil { + objectMap["keyname"] = kvp.KeyName + } + if kvp.KeyVersion != nil { + objectMap["keyversion"] = kvp.KeyVersion + } + if kvp.KeyVaultURI != nil { + objectMap["keyvaulturi"] = kvp.KeyVaultURI + } + return json.Marshal(objectMap) +} + +// LastAccessTimeTrackingPolicy the blob service properties for Last access time based tracking policy. +type LastAccessTimeTrackingPolicy struct { + // Enable - When set to true last access time based tracking is enabled. + Enable *bool `json:"enable,omitempty"` + // Name - Name of the policy. The valid value is AccessTimeTracking. This field is currently read only. Possible values include: 'NameAccessTimeTracking' + Name Name `json:"name,omitempty"` + // TrackingGranularityInDays - The field specifies blob object tracking granularity in days, typically how often the blob object should be tracked.This field is currently read only with value as 1 + TrackingGranularityInDays *int32 `json:"trackingGranularityInDays,omitempty"` + // BlobType - An array of predefined supported blob types. Only blockBlob is the supported value. This field is currently read only + BlobType *[]string `json:"blobType,omitempty"` +} + +// LeaseContainerRequest lease Container request schema. +type LeaseContainerRequest struct { + // Action - Specifies the lease action. Can be one of the available actions. Possible values include: 'Action1Acquire', 'Action1Renew', 'Action1Change', 'Action1Release', 'Action1Break' + Action Action1 `json:"action,omitempty"` + // LeaseID - Identifies the lease. Can be specified in any valid GUID string format. + LeaseID *string `json:"leaseId,omitempty"` + // BreakPeriod - Optional. For a break action, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. + BreakPeriod *int32 `json:"breakPeriod,omitempty"` + // LeaseDuration - Required for acquire. Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + LeaseDuration *int32 `json:"leaseDuration,omitempty"` + // ProposedLeaseID - Optional for acquire, required for change. Proposed lease ID, in a GUID string format. + ProposedLeaseID *string `json:"proposedLeaseId,omitempty"` +} + +// LeaseContainerResponse lease Container response schema. +type LeaseContainerResponse struct { + autorest.Response `json:"-"` + // LeaseID - Returned unique lease ID that must be included with any request to delete the container, or to renew, change, or release the lease. + LeaseID *string `json:"leaseId,omitempty"` + // LeaseTimeSeconds - Approximate time remaining in the lease period, in seconds. + LeaseTimeSeconds *string `json:"leaseTimeSeconds,omitempty"` +} + +// LeaseShareRequest lease Share request schema. +type LeaseShareRequest struct { + // Action - Specifies the lease action. Can be one of the available actions. Possible values include: 'LeaseShareActionAcquire', 'LeaseShareActionRenew', 'LeaseShareActionChange', 'LeaseShareActionRelease', 'LeaseShareActionBreak' + Action LeaseShareAction `json:"action,omitempty"` + // LeaseID - Identifies the lease. Can be specified in any valid GUID string format. + LeaseID *string `json:"leaseId,omitempty"` + // BreakPeriod - Optional. For a break action, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. + BreakPeriod *int32 `json:"breakPeriod,omitempty"` + // LeaseDuration - Required for acquire. Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + LeaseDuration *int32 `json:"leaseDuration,omitempty"` + // ProposedLeaseID - Optional for acquire, required for change. Proposed lease ID, in a GUID string format. + ProposedLeaseID *string `json:"proposedLeaseId,omitempty"` +} + +// LeaseShareResponse lease Share response schema. +type LeaseShareResponse struct { + autorest.Response `json:"-"` + // LeaseID - Returned unique lease ID that must be included with any request to delete the share, or to renew, change, or release the lease. + LeaseID *string `json:"leaseId,omitempty"` + // LeaseTimeSeconds - Approximate time remaining in the lease period, in seconds. + LeaseTimeSeconds *string `json:"leaseTimeSeconds,omitempty"` +} + +// LegalHold the LegalHold property of a blob container. +type LegalHold struct { + autorest.Response `json:"-"` + // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. + HasLegalHold *bool `json:"hasLegalHold,omitempty"` + // Tags - Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case at SRP. + Tags *[]string `json:"tags,omitempty"` +} + +// MarshalJSON is the custom marshaler for LegalHold. +func (lh LegalHold) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lh.Tags != nil { + objectMap["tags"] = lh.Tags + } + return json.Marshal(objectMap) +} + +// LegalHoldProperties the LegalHold property of a blob container. +type LegalHoldProperties struct { + // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. + HasLegalHold *bool `json:"hasLegalHold,omitempty"` + // Tags - The list of LegalHold tags of a blob container. + Tags *[]TagProperty `json:"tags,omitempty"` +} + +// MarshalJSON is the custom marshaler for LegalHoldProperties. +func (lhp LegalHoldProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lhp.Tags != nil { + objectMap["tags"] = lhp.Tags + } + return json.Marshal(objectMap) +} + +// ListAccountSasResponse the List SAS credentials operation response. +type ListAccountSasResponse struct { + autorest.Response `json:"-"` + // AccountSasToken - READ-ONLY; List SAS credentials of storage account. + AccountSasToken *string `json:"accountSasToken,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListAccountSasResponse. +func (lasr ListAccountSasResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListBlobInventoryPolicy list of blob inventory policies returned. +type ListBlobInventoryPolicy struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of blob inventory policies. + Value *[]BlobInventoryPolicy `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListBlobInventoryPolicy. +func (lbip ListBlobInventoryPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListContainerItem the blob container properties be listed out. +type ListContainerItem struct { + // ContainerProperties - The blob container properties be listed out. + *ContainerProperties `json:"properties,omitempty"` + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListContainerItem. +func (lci ListContainerItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lci.ContainerProperties != nil { + objectMap["properties"] = lci.ContainerProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ListContainerItem struct. +func (lci *ListContainerItem) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var containerProperties ContainerProperties + err = json.Unmarshal(*v, &containerProperties) + if err != nil { + return err + } + lci.ContainerProperties = &containerProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + lci.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lci.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lci.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lci.Type = &typeVar + } + } + } + + return nil +} + +// ListContainerItems response schema. Contains list of blobs returned, and if paging is requested or +// required, a URL to next page of containers. +type ListContainerItems struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of blobs containers returned. + Value *[]ListContainerItem `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of containers. Returned when total number of requested containers exceed maximum page size. + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListContainerItems. +func (lci ListContainerItems) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListContainerItemsIterator provides access to a complete listing of ListContainerItem values. +type ListContainerItemsIterator struct { + i int + page ListContainerItemsPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListContainerItemsIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ListContainerItemsIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListContainerItemsIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListContainerItemsIterator) Response() ListContainerItems { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListContainerItemsIterator) Value() ListContainerItem { + if !iter.page.NotDone() { + return ListContainerItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ListContainerItemsIterator type. +func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator { + return ListContainerItemsIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lci ListContainerItems) IsEmpty() bool { + return lci.Value == nil || len(*lci.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (lci ListContainerItems) hasNextLink() bool { + return lci.NextLink != nil && len(*lci.NextLink) != 0 +} + +// listContainerItemsPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lci ListContainerItems) listContainerItemsPreparer(ctx context.Context) (*http.Request, error) { + if !lci.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lci.NextLink))) +} + +// ListContainerItemsPage contains a page of ListContainerItem values. +type ListContainerItemsPage struct { + fn func(context.Context, ListContainerItems) (ListContainerItems, error) + lci ListContainerItems +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListContainerItemsPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.lci) + if err != nil { + return err + } + page.lci = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ListContainerItemsPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListContainerItemsPage) NotDone() bool { + return !page.lci.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListContainerItemsPage) Response() ListContainerItems { + return page.lci +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListContainerItemsPage) Values() []ListContainerItem { + if page.lci.IsEmpty() { + return nil + } + return *page.lci.Value +} + +// Creates a new instance of the ListContainerItemsPage type. +func NewListContainerItemsPage(cur ListContainerItems, getNextPage func(context.Context, ListContainerItems) (ListContainerItems, error)) ListContainerItemsPage { + return ListContainerItemsPage{ + fn: getNextPage, + lci: cur, + } +} + +// ListQueue ... +type ListQueue struct { + // ListQueueProperties - List Queue resource properties. + *ListQueueProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListQueue. +func (lq ListQueue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lq.ListQueueProperties != nil { + objectMap["properties"] = lq.ListQueueProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ListQueue struct. +func (lq *ListQueue) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var queueProperties ListQueueProperties + err = json.Unmarshal(*v, &queueProperties) + if err != nil { + return err + } + lq.ListQueueProperties = &queueProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lq.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lq.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lq.Type = &typeVar + } + } + } + + return nil +} + +// ListQueueProperties ... +type ListQueueProperties struct { + // Metadata - A name-value pair that represents queue metadata. + Metadata map[string]*string `json:"metadata"` +} + +// MarshalJSON is the custom marshaler for ListQueueProperties. +func (lqp ListQueueProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lqp.Metadata != nil { + objectMap["metadata"] = lqp.Metadata + } + return json.Marshal(objectMap) +} + +// ListQueueResource response schema. Contains list of queues returned +type ListQueueResource struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of queues returned. + Value *[]ListQueue `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to list next page of queues + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListQueueResource. +func (lqr ListQueueResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListQueueResourceIterator provides access to a complete listing of ListQueue values. +type ListQueueResourceIterator struct { + i int + page ListQueueResourcePage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListQueueResourceIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListQueueResourceIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ListQueueResourceIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListQueueResourceIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListQueueResourceIterator) Response() ListQueueResource { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListQueueResourceIterator) Value() ListQueue { + if !iter.page.NotDone() { + return ListQueue{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ListQueueResourceIterator type. +func NewListQueueResourceIterator(page ListQueueResourcePage) ListQueueResourceIterator { + return ListQueueResourceIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lqr ListQueueResource) IsEmpty() bool { + return lqr.Value == nil || len(*lqr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (lqr ListQueueResource) hasNextLink() bool { + return lqr.NextLink != nil && len(*lqr.NextLink) != 0 +} + +// listQueueResourcePreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lqr ListQueueResource) listQueueResourcePreparer(ctx context.Context) (*http.Request, error) { + if !lqr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lqr.NextLink))) +} + +// ListQueueResourcePage contains a page of ListQueue values. +type ListQueueResourcePage struct { + fn func(context.Context, ListQueueResource) (ListQueueResource, error) + lqr ListQueueResource +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListQueueResourcePage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListQueueResourcePage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.lqr) + if err != nil { + return err + } + page.lqr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ListQueueResourcePage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListQueueResourcePage) NotDone() bool { + return !page.lqr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListQueueResourcePage) Response() ListQueueResource { + return page.lqr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListQueueResourcePage) Values() []ListQueue { + if page.lqr.IsEmpty() { + return nil + } + return *page.lqr.Value +} + +// Creates a new instance of the ListQueueResourcePage type. +func NewListQueueResourcePage(cur ListQueueResource, getNextPage func(context.Context, ListQueueResource) (ListQueueResource, error)) ListQueueResourcePage { + return ListQueueResourcePage{ + fn: getNextPage, + lqr: cur, + } +} + +// ListQueueServices ... +type ListQueueServices struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of queue services returned. + Value *[]QueueServiceProperties `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListQueueServices. +func (lqs ListQueueServices) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListServiceSasResponse the List service SAS credentials operation response. +type ListServiceSasResponse struct { + autorest.Response `json:"-"` + // ServiceSasToken - READ-ONLY; List service SAS credentials of specific resource. + ServiceSasToken *string `json:"serviceSasToken,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListServiceSasResponse. +func (lssr ListServiceSasResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListTableResource response schema. Contains list of tables returned +type ListTableResource struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of tables returned. + Value *[]Table `json:"value,omitempty"` + // NextLink - READ-ONLY; Request URL that can be used to query next page of tables + NextLink *string `json:"nextLink,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListTableResource. +func (ltr ListTableResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ListTableResourceIterator provides access to a complete listing of Table values. +type ListTableResourceIterator struct { + i int + page ListTableResourcePage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListTableResourceIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListTableResourceIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ListTableResourceIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListTableResourceIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListTableResourceIterator) Response() ListTableResource { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListTableResourceIterator) Value() Table { + if !iter.page.NotDone() { + return Table{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ListTableResourceIterator type. +func NewListTableResourceIterator(page ListTableResourcePage) ListTableResourceIterator { + return ListTableResourceIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ltr ListTableResource) IsEmpty() bool { + return ltr.Value == nil || len(*ltr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (ltr ListTableResource) hasNextLink() bool { + return ltr.NextLink != nil && len(*ltr.NextLink) != 0 +} + +// listTableResourcePreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ltr ListTableResource) listTableResourcePreparer(ctx context.Context) (*http.Request, error) { + if !ltr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ltr.NextLink))) +} + +// ListTableResourcePage contains a page of Table values. +type ListTableResourcePage struct { + fn func(context.Context, ListTableResource) (ListTableResource, error) + ltr ListTableResource +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListTableResourcePage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListTableResourcePage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.ltr) + if err != nil { + return err + } + page.ltr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ListTableResourcePage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListTableResourcePage) NotDone() bool { + return !page.ltr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListTableResourcePage) Response() ListTableResource { + return page.ltr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListTableResourcePage) Values() []Table { + if page.ltr.IsEmpty() { + return nil + } + return *page.ltr.Value +} + +// Creates a new instance of the ListTableResourcePage type. +func NewListTableResourcePage(cur ListTableResource, getNextPage func(context.Context, ListTableResource) (ListTableResource, error)) ListTableResourcePage { + return ListTableResourcePage{ + fn: getNextPage, + ltr: cur, + } +} + +// ListTableServices ... +type ListTableServices struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; List of table services returned. + Value *[]TableServiceProperties `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListTableServices. +func (lts ListTableServices) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ManagementPolicy the Get Storage Account ManagementPolicies operation response. +type ManagementPolicy struct { + autorest.Response `json:"-"` + // ManagementPolicyProperties - Returns the Storage Account Data Policies Rules. + *ManagementPolicyProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagementPolicy. +func (mp ManagementPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mp.ManagementPolicyProperties != nil { + objectMap["properties"] = mp.ManagementPolicyProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ManagementPolicy struct. +func (mp *ManagementPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var managementPolicyProperties ManagementPolicyProperties + err = json.Unmarshal(*v, &managementPolicyProperties) + if err != nil { + return err + } + mp.ManagementPolicyProperties = &managementPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + mp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + mp.Type = &typeVar + } + } + } + + return nil +} + +// ManagementPolicyAction actions are applied to the filtered blobs when the execution condition is met. +type ManagementPolicyAction struct { + // BaseBlob - The management policy action for base blob + BaseBlob *ManagementPolicyBaseBlob `json:"baseBlob,omitempty"` + // Snapshot - The management policy action for snapshot + Snapshot *ManagementPolicySnapShot `json:"snapshot,omitempty"` + // Version - The management policy action for version + Version *ManagementPolicyVersion `json:"version,omitempty"` +} + +// ManagementPolicyBaseBlob management policy action for base blob. +type ManagementPolicyBaseBlob struct { + // TierToCool - The function to tier blobs to cool storage. Support blobs currently at Hot tier + TierToCool *DateAfterModification `json:"tierToCool,omitempty"` + // TierToArchive - The function to tier blobs to archive storage. Support blobs currently at Hot or Cool tier + TierToArchive *DateAfterModification `json:"tierToArchive,omitempty"` + // Delete - The function to delete the blob + Delete *DateAfterModification `json:"delete,omitempty"` + // EnableAutoTierToHotFromCool - This property enables auto tiering of a blob from cool to hot on a blob access. This property requires tierToCool.daysAfterLastAccessTimeGreaterThan. + EnableAutoTierToHotFromCool *bool `json:"enableAutoTierToHotFromCool,omitempty"` +} + +// ManagementPolicyDefinition an object that defines the Lifecycle rule. Each definition is made up with a +// filters set and an actions set. +type ManagementPolicyDefinition struct { + // Actions - An object that defines the action set. + Actions *ManagementPolicyAction `json:"actions,omitempty"` + // Filters - An object that defines the filter set. + Filters *ManagementPolicyFilter `json:"filters,omitempty"` +} + +// ManagementPolicyFilter filters limit rule actions to a subset of blobs within the storage account. If +// multiple filters are defined, a logical AND is performed on all filters. +type ManagementPolicyFilter struct { + // PrefixMatch - An array of strings for prefixes to be match. + PrefixMatch *[]string `json:"prefixMatch,omitempty"` + // BlobTypes - An array of predefined enum values. Currently blockBlob supports all tiering and delete actions. Only delete actions are supported for appendBlob. + BlobTypes *[]string `json:"blobTypes,omitempty"` + // BlobIndexMatch - An array of blob index tag based filters, there can be at most 10 tag filters + BlobIndexMatch *[]TagFilter `json:"blobIndexMatch,omitempty"` +} + +// ManagementPolicyProperties the Storage Account ManagementPolicy properties. +type ManagementPolicyProperties struct { + // LastModifiedTime - READ-ONLY; Returns the date and time the ManagementPolicies was last modified. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // Policy - The Storage Account ManagementPolicy, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. + Policy *ManagementPolicySchema `json:"policy,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagementPolicyProperties. +func (mpp ManagementPolicyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mpp.Policy != nil { + objectMap["policy"] = mpp.Policy + } + return json.Marshal(objectMap) +} + +// ManagementPolicyRule an object that wraps the Lifecycle rule. Each rule is uniquely defined by name. +type ManagementPolicyRule struct { + // Enabled - Rule is enabled if set to true. + Enabled *bool `json:"enabled,omitempty"` + // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy. + Name *string `json:"name,omitempty"` + // Type - The valid value is Lifecycle + Type *string `json:"type,omitempty"` + // Definition - An object that defines the Lifecycle rule. + Definition *ManagementPolicyDefinition `json:"definition,omitempty"` +} + +// ManagementPolicySchema the Storage Account ManagementPolicies Rules. See more details in: +// https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. +type ManagementPolicySchema struct { + // Rules - The Storage Account ManagementPolicies Rules. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. + Rules *[]ManagementPolicyRule `json:"rules,omitempty"` +} + +// ManagementPolicySnapShot management policy action for snapshot. +type ManagementPolicySnapShot struct { + // TierToCool - The function to tier blob snapshot to cool storage. Support blob snapshot currently at Hot tier + TierToCool *DateAfterCreation `json:"tierToCool,omitempty"` + // TierToArchive - The function to tier blob snapshot to archive storage. Support blob snapshot currently at Hot or Cool tier + TierToArchive *DateAfterCreation `json:"tierToArchive,omitempty"` + // Delete - The function to delete the blob snapshot + Delete *DateAfterCreation `json:"delete,omitempty"` +} + +// ManagementPolicyVersion management policy action for blob version. +type ManagementPolicyVersion struct { + // TierToCool - The function to tier blob version to cool storage. Support blob version currently at Hot tier + TierToCool *DateAfterCreation `json:"tierToCool,omitempty"` + // TierToArchive - The function to tier blob version to archive storage. Support blob version currently at Hot or Cool tier + TierToArchive *DateAfterCreation `json:"tierToArchive,omitempty"` + // Delete - The function to delete the blob version + Delete *DateAfterCreation `json:"delete,omitempty"` +} + +// MetricSpecification metric specification of operation. +type MetricSpecification struct { + // Name - Name of metric specification. + Name *string `json:"name,omitempty"` + // DisplayName - Display name of metric specification. + DisplayName *string `json:"displayName,omitempty"` + // DisplayDescription - Display description of metric specification. + DisplayDescription *string `json:"displayDescription,omitempty"` + // Unit - Unit could be Bytes or Count. + Unit *string `json:"unit,omitempty"` + // Dimensions - Dimensions of blobs, including blob type and access tier. + Dimensions *[]Dimension `json:"dimensions,omitempty"` + // AggregationType - Aggregation type could be Average. + AggregationType *string `json:"aggregationType,omitempty"` + // FillGapWithZero - The property to decide fill gap with zero or not. + FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` + // Category - The category this metric specification belong to, could be Capacity. + Category *string `json:"category,omitempty"` + // ResourceIDDimensionNameOverride - Account Resource Id. + ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` +} + +// Multichannel multichannel setting. Applies to Premium FileStorage only. +type Multichannel struct { + // Enabled - Indicates whether multichannel is enabled + Enabled *bool `json:"enabled,omitempty"` +} + +// NetworkRuleSet network rule set +type NetworkRuleSet struct { + // Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'BypassNone', 'BypassLogging', 'BypassMetrics', 'BypassAzureServices' + Bypass Bypass `json:"bypass,omitempty"` + // ResourceAccessRules - Sets the resource access rules + ResourceAccessRules *[]ResourceAccessRule `json:"resourceAccessRules,omitempty"` + // VirtualNetworkRules - Sets the virtual network rules + VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` + // IPRules - Sets the IP ACL rules + IPRules *[]IPRule `json:"ipRules,omitempty"` + // DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny' + DefaultAction DefaultAction `json:"defaultAction,omitempty"` +} + +// ObjectReplicationPolicies list storage account object replication policies. +type ObjectReplicationPolicies struct { + autorest.Response `json:"-"` + // Value - The replication policy between two storage accounts. + Value *[]ObjectReplicationPolicy `json:"value,omitempty"` +} + +// ObjectReplicationPolicy the replication policy between two storage accounts. Multiple rules can be +// defined in one policy. +type ObjectReplicationPolicy struct { + autorest.Response `json:"-"` + // ObjectReplicationPolicyProperties - Returns the Storage Account Object Replication Policy. + *ObjectReplicationPolicyProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ObjectReplicationPolicy. +func (orp ObjectReplicationPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if orp.ObjectReplicationPolicyProperties != nil { + objectMap["properties"] = orp.ObjectReplicationPolicyProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ObjectReplicationPolicy struct. +func (orp *ObjectReplicationPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var objectReplicationPolicyProperties ObjectReplicationPolicyProperties + err = json.Unmarshal(*v, &objectReplicationPolicyProperties) + if err != nil { + return err + } + orp.ObjectReplicationPolicyProperties = &objectReplicationPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + orp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + orp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + orp.Type = &typeVar + } + } + } + + return nil +} + +// ObjectReplicationPolicyFilter filters limit replication to a subset of blobs within the storage account. +// A logical OR is performed on values in the filter. If multiple filters are defined, a logical AND is +// performed on all filters. +type ObjectReplicationPolicyFilter struct { + // PrefixMatch - Optional. Filters the results to replicate only blobs whose names begin with the specified prefix. + PrefixMatch *[]string `json:"prefixMatch,omitempty"` + // MinCreationTime - Blobs created after the time will be replicated to the destination. It must be in datetime format 'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z + MinCreationTime *string `json:"minCreationTime,omitempty"` +} + +// ObjectReplicationPolicyProperties the Storage Account ObjectReplicationPolicy properties. +type ObjectReplicationPolicyProperties struct { + // PolicyID - READ-ONLY; A unique id for object replication policy. + PolicyID *string `json:"policyId,omitempty"` + // EnabledTime - READ-ONLY; Indicates when the policy is enabled on the source account. + EnabledTime *date.Time `json:"enabledTime,omitempty"` + // SourceAccount - Required. Source account name. It should be full resource id if allowCrossTenantReplication set to false. + SourceAccount *string `json:"sourceAccount,omitempty"` + // DestinationAccount - Required. Destination account name. It should be full resource id if allowCrossTenantReplication set to false. + DestinationAccount *string `json:"destinationAccount,omitempty"` + // Rules - The storage account object replication rules. + Rules *[]ObjectReplicationPolicyRule `json:"rules,omitempty"` +} + +// MarshalJSON is the custom marshaler for ObjectReplicationPolicyProperties. +func (orpp ObjectReplicationPolicyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if orpp.SourceAccount != nil { + objectMap["sourceAccount"] = orpp.SourceAccount + } + if orpp.DestinationAccount != nil { + objectMap["destinationAccount"] = orpp.DestinationAccount + } + if orpp.Rules != nil { + objectMap["rules"] = orpp.Rules + } + return json.Marshal(objectMap) +} + +// ObjectReplicationPolicyRule the replication policy rule between two containers. +type ObjectReplicationPolicyRule struct { + // RuleID - Rule Id is auto-generated for each new rule on destination account. It is required for put policy on source account. + RuleID *string `json:"ruleId,omitempty"` + // SourceContainer - Required. Source container name. + SourceContainer *string `json:"sourceContainer,omitempty"` + // DestinationContainer - Required. Destination container name. + DestinationContainer *string `json:"destinationContainer,omitempty"` + // Filters - Optional. An object that defines the filter set. + Filters *ObjectReplicationPolicyFilter `json:"filters,omitempty"` +} + +// Operation storage REST API operation definition. +type Operation struct { + // Name - Operation name: {provider}/{resource}/{operation} + Name *string `json:"name,omitempty"` + // Display - Display metadata associated with the operation. + Display *OperationDisplay `json:"display,omitempty"` + // Origin - The origin of operations. + Origin *string `json:"origin,omitempty"` + // OperationProperties - Properties of operation, include metric specifications. + *OperationProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for Operation. +func (o Operation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if o.Name != nil { + objectMap["name"] = o.Name + } + if o.Display != nil { + objectMap["display"] = o.Display + } + if o.Origin != nil { + objectMap["origin"] = o.Origin + } + if o.OperationProperties != nil { + objectMap["properties"] = o.OperationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Operation struct. +func (o *Operation) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + o.Name = &name + } + case "display": + if v != nil { + var display OperationDisplay + err = json.Unmarshal(*v, &display) + if err != nil { + return err + } + o.Display = &display + } + case "origin": + if v != nil { + var origin string + err = json.Unmarshal(*v, &origin) + if err != nil { + return err + } + o.Origin = &origin + } + case "properties": + if v != nil { + var operationProperties OperationProperties + err = json.Unmarshal(*v, &operationProperties) + if err != nil { + return err + } + o.OperationProperties = &operationProperties + } + } + } + + return nil +} + +// OperationDisplay display metadata associated with the operation. +type OperationDisplay struct { + // Provider - Service provider: Microsoft Storage. + Provider *string `json:"provider,omitempty"` + // Resource - Resource on which the operation is performed etc. + Resource *string `json:"resource,omitempty"` + // Operation - Type of operation: get, read, delete, etc. + Operation *string `json:"operation,omitempty"` + // Description - Description of the operation. + Description *string `json:"description,omitempty"` +} + +// OperationListResult result of the request to list Storage operations. It contains a list of operations +// and a URL link to get the next set of results. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - List of Storage operations supported by the Storage resource provider. + Value *[]Operation `json:"value,omitempty"` +} + +// OperationProperties properties of operation, include metric specifications. +type OperationProperties struct { + // ServiceSpecification - One property of operation, include metric specifications. + ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` +} + +// PrivateEndpoint the Private Endpoint resource. +type PrivateEndpoint struct { + // ID - READ-ONLY; The ARM identifier for Private Endpoint + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for PrivateEndpoint. +func (peVar PrivateEndpoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// PrivateEndpointConnection the Private Endpoint Connection resource. +type PrivateEndpointConnection struct { + autorest.Response `json:"-"` + // PrivateEndpointConnectionProperties - Resource properties. + *PrivateEndpointConnectionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for PrivateEndpointConnection. +func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if pec.PrivateEndpointConnectionProperties != nil { + objectMap["properties"] = pec.PrivateEndpointConnectionProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct. +func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var privateEndpointConnectionProperties PrivateEndpointConnectionProperties + err = json.Unmarshal(*v, &privateEndpointConnectionProperties) + if err != nil { + return err + } + pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + pec.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + pec.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + pec.Type = &typeVar + } + } + } + + return nil +} + +// PrivateEndpointConnectionListResult list of private endpoint connection associated with the specified +// storage account +type PrivateEndpointConnectionListResult struct { + autorest.Response `json:"-"` + // Value - Array of private endpoint connections + Value *[]PrivateEndpointConnection `json:"value,omitempty"` +} + +// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties. +type PrivateEndpointConnectionProperties struct { + // PrivateEndpoint - The resource of private end point. + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` + // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider. + PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` + // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed' + ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` +} + +// PrivateLinkResource a private link resource +type PrivateLinkResource struct { + // PrivateLinkResourceProperties - Resource properties. + *PrivateLinkResourceProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for PrivateLinkResource. +func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if plr.PrivateLinkResourceProperties != nil { + objectMap["properties"] = plr.PrivateLinkResourceProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct. +func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var privateLinkResourceProperties PrivateLinkResourceProperties + err = json.Unmarshal(*v, &privateLinkResourceProperties) + if err != nil { + return err + } + plr.PrivateLinkResourceProperties = &privateLinkResourceProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + plr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + plr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + plr.Type = &typeVar + } + } + } + + return nil +} + +// PrivateLinkResourceListResult a list of private link resources +type PrivateLinkResourceListResult struct { + autorest.Response `json:"-"` + // Value - Array of private link resources + Value *[]PrivateLinkResource `json:"value,omitempty"` +} + +// PrivateLinkResourceProperties properties of a private link resource. +type PrivateLinkResourceProperties struct { + // GroupID - READ-ONLY; The private link resource group id. + GroupID *string `json:"groupId,omitempty"` + // RequiredMembers - READ-ONLY; The private link resource required member names. + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + // RequiredZoneNames - The private link resource Private link DNS zone name. + RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"` +} + +// MarshalJSON is the custom marshaler for PrivateLinkResourceProperties. +func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if plrp.RequiredZoneNames != nil { + objectMap["requiredZoneNames"] = plrp.RequiredZoneNames + } + return json.Marshal(objectMap) +} + +// PrivateLinkServiceConnectionState a collection of information about the state of the connection between +// service consumer and provider. +type PrivateLinkServiceConnectionState struct { + // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected' + Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"` + // Description - The reason for approval/rejection of the connection. + Description *string `json:"description,omitempty"` + // ActionRequired - A message indicating if changes on the service provider require any updates on the consumer. + ActionRequired *string `json:"actionRequired,omitempty"` +} + +// ProtocolSettings protocol settings for file service +type ProtocolSettings struct { + // Smb - Setting for SMB protocol + Smb *SmbSetting `json:"smb,omitempty"` +} + +// ProxyResource the resource model definition for a Azure Resource Manager proxy resource. It will not +// have tags and a location +type ProxyResource struct { + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ProxyResource. +func (pr ProxyResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// Queue ... +type Queue struct { + autorest.Response `json:"-"` + // QueueProperties - Queue resource properties. + *QueueProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Queue. +func (q Queue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if q.QueueProperties != nil { + objectMap["properties"] = q.QueueProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Queue struct. +func (q *Queue) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var queueProperties QueueProperties + err = json.Unmarshal(*v, &queueProperties) + if err != nil { + return err + } + q.QueueProperties = &queueProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + q.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + q.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + q.Type = &typeVar + } + } + } + + return nil +} + +// QueueProperties ... +type QueueProperties struct { + // Metadata - A name-value pair that represents queue metadata. + Metadata map[string]*string `json:"metadata"` + // ApproximateMessageCount - READ-ONLY; Integer indicating an approximate number of messages in the queue. This number is not lower than the actual number of messages in the queue, but could be higher. + ApproximateMessageCount *int32 `json:"approximateMessageCount,omitempty"` +} + +// MarshalJSON is the custom marshaler for QueueProperties. +func (qp QueueProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if qp.Metadata != nil { + objectMap["metadata"] = qp.Metadata + } + return json.Marshal(objectMap) +} + +// QueueServiceProperties the properties of a storage account’s Queue service. +type QueueServiceProperties struct { + autorest.Response `json:"-"` + // QueueServicePropertiesProperties - The properties of a storage account’s Queue service. + *QueueServicePropertiesProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for QueueServiceProperties. +func (qsp QueueServiceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if qsp.QueueServicePropertiesProperties != nil { + objectMap["properties"] = qsp.QueueServicePropertiesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for QueueServiceProperties struct. +func (qsp *QueueServiceProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var queueServiceProperties QueueServicePropertiesProperties + err = json.Unmarshal(*v, &queueServiceProperties) + if err != nil { + return err + } + qsp.QueueServicePropertiesProperties = &queueServiceProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + qsp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + qsp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + qsp.Type = &typeVar + } + } + } + + return nil +} + +// QueueServicePropertiesProperties the properties of a storage account’s Queue service. +type QueueServicePropertiesProperties struct { + // Cors - Specifies CORS rules for the Queue service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Queue service. + Cors *CorsRules `json:"cors,omitempty"` +} + +// Resource common fields that are returned in the response for all Azure Resource Manager resources +type Resource struct { + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// ResourceAccessRule resource Access Rule. +type ResourceAccessRule struct { + // TenantID - Tenant Id + TenantID *string `json:"tenantId,omitempty"` + // ResourceID - Resource Id + ResourceID *string `json:"resourceId,omitempty"` +} + +// RestorePolicyProperties the blob service properties for blob restore policy +type RestorePolicyProperties struct { + // Enabled - Blob restore is enabled if set to true. + Enabled *bool `json:"enabled,omitempty"` + // Days - how long this blob can be restored. It should be great than zero and less than DeleteRetentionPolicy.days. + Days *int32 `json:"days,omitempty"` + // LastEnabledTime - READ-ONLY; Deprecated in favor of minRestoreTime property. + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` + // MinRestoreTime - READ-ONLY; Returns the minimum date and time that the restore can be started. + MinRestoreTime *date.Time `json:"minRestoreTime,omitempty"` +} + +// MarshalJSON is the custom marshaler for RestorePolicyProperties. +func (rpp RestorePolicyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rpp.Enabled != nil { + objectMap["enabled"] = rpp.Enabled + } + if rpp.Days != nil { + objectMap["days"] = rpp.Days + } + return json.Marshal(objectMap) +} + +// Restriction the restriction because of which SKU cannot be used. +type Restriction struct { + // Type - READ-ONLY; The type of restrictions. As of now only possible value for this is location. + Type *string `json:"type,omitempty"` + // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // ReasonCode - The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. Possible values include: 'ReasonCodeQuotaID', 'ReasonCodeNotAvailableForSubscription' + ReasonCode ReasonCode `json:"reasonCode,omitempty"` +} + +// MarshalJSON is the custom marshaler for Restriction. +func (r Restriction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.ReasonCode != "" { + objectMap["reasonCode"] = r.ReasonCode + } + return json.Marshal(objectMap) +} + +// RoutingPreference routing preference defines the type of network, either microsoft or internet routing +// to be used to deliver the user data, the default option is microsoft routing +type RoutingPreference struct { + // RoutingChoice - Routing Choice defines the kind of network routing opted by the user. Possible values include: 'RoutingChoiceMicrosoftRouting', 'RoutingChoiceInternetRouting' + RoutingChoice RoutingChoice `json:"routingChoice,omitempty"` + // PublishMicrosoftEndpoints - A boolean flag which indicates whether microsoft routing storage endpoints are to be published + PublishMicrosoftEndpoints *bool `json:"publishMicrosoftEndpoints,omitempty"` + // PublishInternetEndpoints - A boolean flag which indicates whether internet routing storage endpoints are to be published + PublishInternetEndpoints *bool `json:"publishInternetEndpoints,omitempty"` +} + +// SasPolicy sasPolicy assigned to the storage account. +type SasPolicy struct { + // SasExpirationPeriod - The SAS expiration period, DD.HH:MM:SS. + SasExpirationPeriod *string `json:"sasExpirationPeriod,omitempty"` + // ExpirationAction - The SAS expiration action. Can only be Log. + ExpirationAction *string `json:"expirationAction,omitempty"` +} + +// ServiceSasParameters the parameters to list service SAS credentials of a specific resource. +type ServiceSasParameters struct { + // CanonicalizedResource - The canonical path to the signed resource. + CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` + // Resource - The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). Possible values include: 'SignedResourceB', 'SignedResourceC', 'SignedResourceF', 'SignedResourceS' + Resource SignedResource `json:"signedResource,omitempty"` + // Permissions - The signed permissions for the service SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'PermissionsR', 'PermissionsD', 'PermissionsW', 'PermissionsL', 'PermissionsA', 'PermissionsC', 'PermissionsU', 'PermissionsP' + Permissions Permissions `json:"signedPermission,omitempty"` + // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. + IPAddressOrRange *string `json:"signedIp,omitempty"` + // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'HTTPProtocolHttpshttp', 'HTTPProtocolHTTPS' + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + // SharedAccessStartTime - The time at which the SAS becomes valid. + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + // Identifier - A unique value up to 64 characters in length that correlates to an access policy specified for the container, queue, or table. + Identifier *string `json:"signedIdentifier,omitempty"` + // PartitionKeyStart - The start of partition key. + PartitionKeyStart *string `json:"startPk,omitempty"` + // PartitionKeyEnd - The end of partition key. + PartitionKeyEnd *string `json:"endPk,omitempty"` + // RowKeyStart - The start of row key. + RowKeyStart *string `json:"startRk,omitempty"` + // RowKeyEnd - The end of row key. + RowKeyEnd *string `json:"endRk,omitempty"` + // KeyToSign - The key to sign the account SAS token with. + KeyToSign *string `json:"keyToSign,omitempty"` + // CacheControl - The response header override for cache control. + CacheControl *string `json:"rscc,omitempty"` + // ContentDisposition - The response header override for content disposition. + ContentDisposition *string `json:"rscd,omitempty"` + // ContentEncoding - The response header override for content encoding. + ContentEncoding *string `json:"rsce,omitempty"` + // ContentLanguage - The response header override for content language. + ContentLanguage *string `json:"rscl,omitempty"` + // ContentType - The response header override for content type. + ContentType *string `json:"rsct,omitempty"` +} + +// ServiceSpecification one property of operation, include metric specifications. +type ServiceSpecification struct { + // MetricSpecifications - Metric specifications of operation. + MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` +} + +// SignedIdentifier ... +type SignedIdentifier struct { + // ID - An unique identifier of the stored access policy. + ID *string `json:"id,omitempty"` + // AccessPolicy - Access policy + AccessPolicy *AccessPolicy `json:"accessPolicy,omitempty"` +} + +// Sku the SKU of the storage account. +type Sku struct { + // Name - Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS' + Name SkuName `json:"name,omitempty"` + // Tier - Possible values include: 'SkuTierStandard', 'SkuTierPremium' + Tier SkuTier `json:"tier,omitempty"` +} + +// SKUCapability the capability information in the specified SKU, including file encryption, network ACLs, +// change notification, etc. +type SKUCapability struct { + // Name - READ-ONLY; The name of capability, The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc. + Name *string `json:"name,omitempty"` + // Value - READ-ONLY; A string value to indicate states of given capability. Possibly 'true' or 'false'. + Value *string `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for SKUCapability. +func (sc SKUCapability) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SkuInformation storage SKU and its properties +type SkuInformation struct { + // Name - Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS' + Name SkuName `json:"name,omitempty"` + // Tier - Possible values include: 'SkuTierStandard', 'SkuTierPremium' + Tier SkuTier `json:"tier,omitempty"` + // ResourceType - READ-ONLY; The type of the resource, usually it is 'storageAccounts'. + ResourceType *string `json:"resourceType,omitempty"` + // Kind - READ-ONLY; Indicates the type of storage account. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' + Kind Kind `json:"kind,omitempty"` + // Locations - READ-ONLY; The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). + Locations *[]string `json:"locations,omitempty"` + // Capabilities - READ-ONLY; The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc. + Capabilities *[]SKUCapability `json:"capabilities,omitempty"` + // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]Restriction `json:"restrictions,omitempty"` +} + +// MarshalJSON is the custom marshaler for SkuInformation. +func (si SkuInformation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if si.Name != "" { + objectMap["name"] = si.Name + } + if si.Tier != "" { + objectMap["tier"] = si.Tier + } + if si.Restrictions != nil { + objectMap["restrictions"] = si.Restrictions + } + return json.Marshal(objectMap) +} + +// SkuListResult the response from the List Storage SKUs operation. +type SkuListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Get the list result of storage SKUs and their properties. + Value *[]SkuInformation `json:"value,omitempty"` +} + +// MarshalJSON is the custom marshaler for SkuListResult. +func (slr SkuListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// SmbSetting setting for SMB protocol +type SmbSetting struct { + // Multichannel - Multichannel setting. Applies to Premium FileStorage only. + Multichannel *Multichannel `json:"multichannel,omitempty"` + // Versions - SMB protocol versions supported by server. Valid values are SMB2.1, SMB3.0, SMB3.1.1. Should be passed as a string with delimiter ';'. + Versions *string `json:"versions,omitempty"` + // AuthenticationMethods - SMB authentication methods supported by server. Valid values are NTLMv2, Kerberos. Should be passed as a string with delimiter ';'. + AuthenticationMethods *string `json:"authenticationMethods,omitempty"` + // KerberosTicketEncryption - Kerberos ticket encryption supported by server. Valid values are RC4-HMAC, AES-256. Should be passed as a string with delimiter ';' + KerberosTicketEncryption *string `json:"kerberosTicketEncryption,omitempty"` + // ChannelEncryption - SMB channel encryption supported by server. Valid values are AES-128-CCM, AES-128-GCM, AES-256-GCM. Should be passed as a string with delimiter ';'. + ChannelEncryption *string `json:"channelEncryption,omitempty"` +} + +// SystemData metadata pertaining to creation and last modification of the resource. +type SystemData struct { + // CreatedBy - The identity that created the resource. + CreatedBy *string `json:"createdBy,omitempty"` + // CreatedByType - The type of identity that created the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' + CreatedByType CreatedByType `json:"createdByType,omitempty"` + // CreatedAt - The timestamp of resource creation (UTC). + CreatedAt *date.Time `json:"createdAt,omitempty"` + // LastModifiedBy - The identity that last modified the resource. + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + // LastModifiedByType - The type of identity that last modified the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' + LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"` + // LastModifiedAt - The timestamp of resource last modification (UTC) + LastModifiedAt *date.Time `json:"lastModifiedAt,omitempty"` +} + +// Table properties of the table, including Id, resource name, resource type. +type Table struct { + autorest.Response `json:"-"` + // TableProperties - Table resource properties. + *TableProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Table. +func (t Table) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if t.TableProperties != nil { + objectMap["properties"] = t.TableProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Table struct. +func (t *Table) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var tableProperties TableProperties + err = json.Unmarshal(*v, &tableProperties) + if err != nil { + return err + } + t.TableProperties = &tableProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + t.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + t.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + t.Type = &typeVar + } + } + } + + return nil +} + +// TableProperties ... +type TableProperties struct { + // TableName - READ-ONLY; Table name under the specified account + TableName *string `json:"tableName,omitempty"` +} + +// MarshalJSON is the custom marshaler for TableProperties. +func (tp TableProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// TableServiceProperties the properties of a storage account’s Table service. +type TableServiceProperties struct { + autorest.Response `json:"-"` + // TableServicePropertiesProperties - The properties of a storage account’s Table service. + *TableServicePropertiesProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TableServiceProperties. +func (tsp TableServiceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tsp.TableServicePropertiesProperties != nil { + objectMap["properties"] = tsp.TableServicePropertiesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for TableServiceProperties struct. +func (tsp *TableServiceProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var tableServiceProperties TableServicePropertiesProperties + err = json.Unmarshal(*v, &tableServiceProperties) + if err != nil { + return err + } + tsp.TableServicePropertiesProperties = &tableServiceProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + tsp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + tsp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + tsp.Type = &typeVar + } + } + } + + return nil +} + +// TableServicePropertiesProperties the properties of a storage account’s Table service. +type TableServicePropertiesProperties struct { + // Cors - Specifies CORS rules for the Table service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Table service. + Cors *CorsRules `json:"cors,omitempty"` +} + +// TagFilter blob index tag based filtering for blob objects +type TagFilter struct { + // Name - This is the filter tag name, it can have 1 - 128 characters + Name *string `json:"name,omitempty"` + // Op - This is the comparison operator which is used for object comparison and filtering. Only == (equality operator) is currently supported + Op *string `json:"op,omitempty"` + // Value - This is the filter tag value field used for tag based filtering, it can have 0 - 256 characters + Value *string `json:"value,omitempty"` +} + +// TagProperty a tag of the LegalHold of a blob container. +type TagProperty struct { + // Tag - READ-ONLY; The tag value. + Tag *string `json:"tag,omitempty"` + // Timestamp - READ-ONLY; Returns the date and time the tag was added. + Timestamp *date.Time `json:"timestamp,omitempty"` + // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who added the tag. + ObjectIdentifier *string `json:"objectIdentifier,omitempty"` + // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who added the tag. + TenantID *string `json:"tenantId,omitempty"` + // Upn - READ-ONLY; Returns the User Principal Name of the user who added the tag. + Upn *string `json:"upn,omitempty"` +} + +// MarshalJSON is the custom marshaler for TagProperty. +func (tp TagProperty) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// TrackedResource the resource model definition for an Azure Resource Manager tracked top level resource +// which has 'tags' and a 'location' +type TrackedResource struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TrackedResource. +func (tr TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tr.Tags != nil { + objectMap["tags"] = tr.Tags + } + if tr.Location != nil { + objectMap["location"] = tr.Location + } + return json.Marshal(objectMap) +} + +// UpdateHistoryProperty an update history of the ImmutabilityPolicy of a blob container. +type UpdateHistoryProperty struct { + // Update - READ-ONLY; The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. Possible values include: 'ImmutabilityPolicyUpdateTypePut', 'ImmutabilityPolicyUpdateTypeLock', 'ImmutabilityPolicyUpdateTypeExtend' + Update ImmutabilityPolicyUpdateType `json:"update,omitempty"` + // ImmutabilityPeriodSinceCreationInDays - READ-ONLY; The immutability period for the blobs in the container since the policy creation, in days. + ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"` + // Timestamp - READ-ONLY; Returns the date and time the ImmutabilityPolicy was updated. + Timestamp *date.Time `json:"timestamp,omitempty"` + // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who updated the ImmutabilityPolicy. + ObjectIdentifier *string `json:"objectIdentifier,omitempty"` + // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who updated the ImmutabilityPolicy. + TenantID *string `json:"tenantId,omitempty"` + // Upn - READ-ONLY; Returns the User Principal Name of the user who updated the ImmutabilityPolicy. + Upn *string `json:"upn,omitempty"` +} + +// MarshalJSON is the custom marshaler for UpdateHistoryProperty. +func (uhp UpdateHistoryProperty) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// Usage describes Storage Resource Usage. +type Usage struct { + // Unit - READ-ONLY; Gets the unit of measurement. Possible values include: 'UsageUnitCount', 'UsageUnitBytes', 'UsageUnitSeconds', 'UsageUnitPercent', 'UsageUnitCountsPerSecond', 'UsageUnitBytesPerSecond' + Unit UsageUnit `json:"unit,omitempty"` + // CurrentValue - READ-ONLY; Gets the current count of the allocated resources in the subscription. + CurrentValue *int32 `json:"currentValue,omitempty"` + // Limit - READ-ONLY; Gets the maximum count of the resources that can be allocated in the subscription. + Limit *int32 `json:"limit,omitempty"` + // Name - READ-ONLY; Gets the name of the type of usage. + Name *UsageName `json:"name,omitempty"` +} + +// MarshalJSON is the custom marshaler for Usage. +func (u Usage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// UsageListResult the response from the List Usages operation. +type UsageListResult struct { + autorest.Response `json:"-"` + // Value - Gets or sets the list of Storage Resource Usages. + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName the usage names that can be used; currently limited to StorageAccount. +type UsageName struct { + // Value - READ-ONLY; Gets a string describing the resource name. + Value *string `json:"value,omitempty"` + // LocalizedValue - READ-ONLY; Gets a localized string describing the resource name. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// MarshalJSON is the custom marshaler for UsageName. +func (un UsageName) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// UserAssignedIdentity userAssignedIdentity for the resource. +type UserAssignedIdentity struct { + // PrincipalID - READ-ONLY; The principal ID of the identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - READ-ONLY; The client ID of the identity. + ClientID *string `json:"clientId,omitempty"` +} + +// MarshalJSON is the custom marshaler for UserAssignedIdentity. +func (uai UserAssignedIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// VirtualNetworkRule virtual Network rule. +type VirtualNetworkRule struct { + // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}. + VirtualNetworkResourceID *string `json:"id,omitempty"` + // Action - The action of virtual network rule. Possible values include: 'ActionAllow' + Action Action `json:"action,omitempty"` + // State - Gets the state of virtual network rule. Possible values include: 'StateProvisioning', 'StateDeprovisioning', 'StateSucceeded', 'StateFailed', 'StateNetworkSourceDeleted' + State State `json:"state,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/objectreplicationpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/objectreplicationpolicies.go new file mode 100644 index 000000000000..c41bedaf4236 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/objectreplicationpolicies.go @@ -0,0 +1,417 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ObjectReplicationPoliciesClient is the the Azure Storage Management API. +type ObjectReplicationPoliciesClient struct { + BaseClient +} + +// NewObjectReplicationPoliciesClient creates an instance of the ObjectReplicationPoliciesClient client. +func NewObjectReplicationPoliciesClient(subscriptionID string) ObjectReplicationPoliciesClient { + return NewObjectReplicationPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewObjectReplicationPoliciesClientWithBaseURI creates an instance of the ObjectReplicationPoliciesClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewObjectReplicationPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ObjectReplicationPoliciesClient { + return ObjectReplicationPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update the object replication policy of the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// objectReplicationPolicyID - the ID of object replication policy or 'default' if the policy ID is unknown. +// properties - the object replication policy set to a storage account. A unique policy ID will be created if +// absent. +func (client ObjectReplicationPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string, properties ObjectReplicationPolicy) (result ObjectReplicationPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: objectReplicationPolicyID, + Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: properties, + Constraints: []validation.Constraint{{Target: "properties.ObjectReplicationPolicyProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "properties.ObjectReplicationPolicyProperties.SourceAccount", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "properties.ObjectReplicationPolicyProperties.DestinationAccount", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID, properties) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ObjectReplicationPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string, properties ObjectReplicationPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters), + autorest.WithJSON(properties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ObjectReplicationPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ObjectReplicationPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ObjectReplicationPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the object replication policy associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// objectReplicationPolicyID - the ID of object replication policy or 'default' if the policy ID is unknown. +func (client ObjectReplicationPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: objectReplicationPolicyID, + Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ObjectReplicationPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ObjectReplicationPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ObjectReplicationPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the object replication policy of the storage account by policy ID. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// objectReplicationPolicyID - the ID of object replication policy or 'default' if the policy ID is unknown. +func (client ObjectReplicationPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (result ObjectReplicationPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: objectReplicationPolicyID, + Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client ObjectReplicationPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ObjectReplicationPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ObjectReplicationPoliciesClient) GetResponder(resp *http.Response) (result ObjectReplicationPolicy, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list the object replication policies associated with the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client ObjectReplicationPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ObjectReplicationPolicies, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client ObjectReplicationPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ObjectReplicationPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ObjectReplicationPoliciesClient) ListResponder(resp *http.Response) (result ObjectReplicationPolicies, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/operations.go similarity index 86% rename from vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/operations.go index dcd8de83c6ad..3f8311c259bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/operations.go @@ -1,4 +1,4 @@ -package account +package storage // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. @@ -14,7 +14,7 @@ import ( "net/http" ) -// OperationsClient is the creates an Azure Data Lake Store account management client. +// OperationsClient is the the Azure Storage Management API. type OperationsClient struct { BaseClient } @@ -30,7 +30,7 @@ func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) Opera return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists all of the available Data Lake Store REST API operations. +// List lists all of the available Storage Rest API operations. func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") @@ -44,20 +44,20 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe } req, err := client.ListPreparer(ctx) if err != nil { - err = autorest.NewErrorWithError(err, "account.OperationsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "account.OperationsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") return } result, err = client.ListResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "account.OperationsClient", "List", resp, "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") return } @@ -66,7 +66,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2016-11-01" + const APIVersion = "2021-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -74,7 +74,7 @@ func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.DataLakeStore/operations"), + autorest.WithPath("/providers/Microsoft.Storage/operations"), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/privateendpointconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/privateendpointconnections.go new file mode 100644 index 000000000000..33702adf9a0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/privateendpointconnections.go @@ -0,0 +1,411 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// PrivateEndpointConnectionsClient is the the Azure Storage Management API. +type PrivateEndpointConnectionsClient struct { + BaseClient +} + +// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client. +func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient { + return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient { + return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Delete deletes the specified private endpoint connection associated with the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure +// resource +func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified private endpoint connection associated with the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure +// resource +func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list all the private endpoint connections associated with the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client PrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName string, accountName string) (result PrivateEndpointConnectionListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client PrivateEndpointConnectionsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PrivateEndpointConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PrivateEndpointConnectionsClient) ListResponder(resp *http.Response) (result PrivateEndpointConnectionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Put update the state of specified private endpoint connection associated with the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure +// resource +// properties - the private endpoint connection properties. +func (client PrivateEndpointConnectionsClient) Put(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (result PrivateEndpointConnection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Put") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: properties, + Constraints: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Put", err.Error()) + } + + req, err := client.PutPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName, properties) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", nil, "Failure preparing request") + return + } + + resp, err := client.PutSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure sending request") + return + } + + result, err = client.PutResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure responding to request") + return + } + + return +} + +// PutPreparer prepares the Put request. +func (client PrivateEndpointConnectionsClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), + autorest.WithJSON(properties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PutSender sends the Put request. The method will close the +// http.Response Body if it receives an error. +func (client PrivateEndpointConnectionsClient) PutSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// PutResponder handles the response to the Put request. The method always +// closes the http.Response Body. +func (client PrivateEndpointConnectionsClient) PutResponder(resp *http.Response) (result PrivateEndpointConnection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/privatelinkresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/privatelinkresources.go new file mode 100644 index 000000000000..75c49f63da03 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/privatelinkresources.go @@ -0,0 +1,124 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// PrivateLinkResourcesClient is the the Azure Storage Management API. +type PrivateLinkResourcesClient struct { + BaseClient +} + +// NewPrivateLinkResourcesClient creates an instance of the PrivateLinkResourcesClient client. +func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient { + return NewPrivateLinkResourcesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPrivateLinkResourcesClientWithBaseURI creates an instance of the PrivateLinkResourcesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient { + return PrivateLinkResourcesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ListByStorageAccount gets the private link resources that need to be created for a storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client PrivateLinkResourcesClient) ListByStorageAccount(ctx context.Context, resourceGroupName string, accountName string) (result PrivateLinkResourceListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PrivateLinkResourcesClient.ListByStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.PrivateLinkResourcesClient", "ListByStorageAccount", err.Error()) + } + + req, err := client.ListByStorageAccountPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.ListByStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.ListByStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure responding to request") + return + } + + return +} + +// ListByStorageAccountPreparer prepares the ListByStorageAccount request. +func (client PrivateLinkResourcesClient) ListByStorageAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByStorageAccountSender sends the ListByStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client PrivateLinkResourcesClient) ListByStorageAccountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByStorageAccountResponder handles the response to the ListByStorageAccount request. The method always +// closes the http.Response Body. +func (client PrivateLinkResourcesClient) ListByStorageAccountResponder(resp *http.Response) (result PrivateLinkResourceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/queue.go new file mode 100644 index 000000000000..372c626f5678 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/queue.go @@ -0,0 +1,571 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// QueueClient is the the Azure Storage Management API. +type QueueClient struct { + BaseClient +} + +// NewQueueClient creates an instance of the QueueClient client. +func NewQueueClient(subscriptionID string) QueueClient { + return NewQueueClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewQueueClientWithBaseURI creates an instance of the QueueClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewQueueClientWithBaseURI(baseURI string, subscriptionID string) QueueClient { + return QueueClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a new queue with the specified queue name, under the specified account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an +// alphanumeric character and it cannot have two consecutive dash(-) characters. +// queue - queue properties and metadata to be created with +func (client QueueClient) Create(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (result Queue, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: queueName, + Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, queueName, queue) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client QueueClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), + autorest.WithJSON(queue), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client QueueClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client QueueClient) CreateResponder(resp *http.Response) (result Queue, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the queue with the specified queue name, under the specified account if it exists. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an +// alphanumeric character and it cannot have two consecutive dash(-) characters. +func (client QueueClient) Delete(ctx context.Context, resourceGroupName string, accountName string, queueName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: queueName, + Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, queueName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client QueueClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client QueueClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client QueueClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the queue with the specified queue name, under the specified account if it exists. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an +// alphanumeric character and it cannot have two consecutive dash(-) characters. +func (client QueueClient) Get(ctx context.Context, resourceGroupName string, accountName string, queueName string) (result Queue, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: queueName, + Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, queueName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client QueueClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client QueueClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client QueueClient) GetResponder(resp *http.Response) (result Queue, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all the queues under the specified storage account +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// maxpagesize - optional, a maximum number of queues that should be included in a list queue response +// filter - optional, When specified, only the queues with a name starting with the given filter will be +// listed. +func (client QueueClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListQueueResourcePage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.List") + defer func() { + sc := -1 + if result.lqr.Response.Response != nil { + sc = result.lqr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lqr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", resp, "Failure sending request") + return + } + + result.lqr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", resp, "Failure responding to request") + return + } + if result.lqr.hasNextLink() && result.lqr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client QueueClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(maxpagesize) > 0 { + queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client QueueClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client QueueClient) ListResponder(resp *http.Response) (result ListQueueResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client QueueClient) listNextResults(ctx context.Context, lastResults ListQueueResource) (result ListQueueResource, err error) { + req, err := lastResults.listQueueResourcePreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client QueueClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListQueueResourceIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter) + return +} + +// Update creates a new queue with the specified queue name, under the specified account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an +// alphanumeric character and it cannot have two consecutive dash(-) characters. +// queue - queue properties and metadata to be created with +func (client QueueClient) Update(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (result Queue, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: queueName, + Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, queueName, queue) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client QueueClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), + autorest.WithJSON(queue), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client QueueClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client QueueClient) UpdateResponder(resp *http.Response) (result Queue, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/queueservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/queueservices.go new file mode 100644 index 000000000000..f778ad7a1519 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/queueservices.go @@ -0,0 +1,313 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// QueueServicesClient is the the Azure Storage Management API. +type QueueServicesClient struct { + BaseClient +} + +// NewQueueServicesClient creates an instance of the QueueServicesClient client. +func NewQueueServicesClient(subscriptionID string) QueueServicesClient { + return NewQueueServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewQueueServicesClientWithBaseURI creates an instance of the QueueServicesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewQueueServicesClientWithBaseURI(baseURI string, subscriptionID string) QueueServicesClient { + return QueueServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetServiceProperties gets the properties of a storage account’s Queue service, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client QueueServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result QueueServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.GetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueServicesClient", "GetServiceProperties", err.Error()) + } + + req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.GetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetServicePropertiesPreparer prepares the GetServiceProperties request. +func (client QueueServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "queueServiceName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client QueueServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always +// closes the http.Response Body. +func (client QueueServicesClient) GetServicePropertiesResponder(resp *http.Response) (result QueueServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list all queue services for the storage account +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client QueueServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListQueueServices, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueServicesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client QueueServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client QueueServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client QueueServicesClient) ListResponder(resp *http.Response) (result ListQueueServices, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetServiceProperties sets the properties of a storage account’s Queue service, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the properties of a storage account’s Queue service, only properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules can be specified. +func (client QueueServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters QueueServiceProperties) (result QueueServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.SetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.QueueServicesClient", "SetServiceProperties", err.Error()) + } + + req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.SetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// SetServicePropertiesPreparer prepares the SetServiceProperties request. +func (client QueueServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters QueueServiceProperties) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "queueServiceName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client QueueServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always +// closes the http.Response Body. +func (client QueueServicesClient) SetServicePropertiesResponder(resp *http.Response) (result QueueServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/skus.go new file mode 100644 index 000000000000..5fdc10896d3a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/skus.go @@ -0,0 +1,109 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SkusClient is the the Azure Storage Management API. +type SkusClient struct { + BaseClient +} + +// NewSkusClient creates an instance of the SkusClient client. +func NewSkusClient(subscriptionID string) SkusClient { + return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSkusClientWithBaseURI creates an instance of the SkusClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient { + return SkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists the available SKUs supported by Microsoft.Storage for given subscription. +func (client SkusClient) List(ctx context.Context) (result SkuListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SkusClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.SkusClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client SkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/table.go new file mode 100644 index 000000000000..51a83b904d3a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/table.go @@ -0,0 +1,556 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TableClient is the the Azure Storage Management API. +type TableClient struct { + BaseClient +} + +// NewTableClient creates an instance of the TableClient client. +func NewTableClient(subscriptionID string) TableClient { + return NewTableClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTableClientWithBaseURI creates an instance of the TableClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewTableClientWithBaseURI(baseURI string, subscriptionID string) TableClient { + return TableClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a new table with the specified table name, under the specified account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. +func (client TableClient) Create(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: tableName, + Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, + {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, tableName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client TableClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client TableClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client TableClient) CreateResponder(resp *http.Response) (result Table, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the table with the specified table name, under the specified account if it exists. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. +func (client TableClient) Delete(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: tableName, + Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, + {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, tableName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TableClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TableClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TableClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the table with the specified table name, under the specified account if it exists. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. +func (client TableClient) Get(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: tableName, + Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, + {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, tableName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client TableClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client TableClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client TableClient) GetResponder(resp *http.Response) (result Table, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all the tables under the specified storage account +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client TableClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListTableResourcePage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.List") + defer func() { + sc := -1 + if result.ltr.Response.Response != nil { + sc = result.ltr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ltr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableClient", "List", resp, "Failure sending request") + return + } + + result.ltr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "List", resp, "Failure responding to request") + return + } + if result.ltr.hasNextLink() && result.ltr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client TableClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TableClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TableClient) ListResponder(resp *http.Response) (result ListTableResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TableClient) listNextResults(ctx context.Context, lastResults ListTableResource) (result ListTableResource, err error) { + req, err := lastResults.listTableResourcePreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TableClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result ListTableResourceIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName) + return +} + +// Update creates a new table with the specified table name, under the specified account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The +// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. +func (client TableClient) Update(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: tableName, + Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, + {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, tableName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", resp, "Failure responding to request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client TableClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client TableClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client TableClient) UpdateResponder(resp *http.Response) (result Table, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/tableservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/tableservices.go new file mode 100644 index 000000000000..f8f19232e0fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/tableservices.go @@ -0,0 +1,313 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TableServicesClient is the the Azure Storage Management API. +type TableServicesClient struct { + BaseClient +} + +// NewTableServicesClient creates an instance of the TableServicesClient client. +func NewTableServicesClient(subscriptionID string) TableServicesClient { + return NewTableServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTableServicesClientWithBaseURI creates an instance of the TableServicesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewTableServicesClientWithBaseURI(baseURI string, subscriptionID string) TableServicesClient { + return TableServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetServiceProperties gets the properties of a storage account’s Table service, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client TableServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result TableServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.GetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableServicesClient", "GetServiceProperties", err.Error()) + } + + req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.GetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetServicePropertiesPreparer prepares the GetServiceProperties request. +func (client TableServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableServiceName": autorest.Encode("path", "default"), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client TableServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always +// closes the http.Response Body. +func (client TableServicesClient) GetServicePropertiesResponder(resp *http.Response) (result TableServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list all table services for the storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client TableServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListTableServices, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableServicesClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", resp, "Failure responding to request") + return + } + + return +} + +// ListPreparer prepares the List request. +func (client TableServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TableServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TableServicesClient) ListResponder(resp *http.Response) (result ListTableServices, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetServiceProperties sets the properties of a storage account’s Table service, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the properties of a storage account’s Table service, only properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules can be specified. +func (client TableServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters TableServiceProperties) (result TableServiceProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.SetServiceProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.TableServicesClient", "SetServiceProperties", err.Error()) + } + + req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetServicePropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", resp, "Failure sending request") + return + } + + result, err = client.SetServicePropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", resp, "Failure responding to request") + return + } + + return +} + +// SetServicePropertiesPreparer prepares the SetServiceProperties request. +func (client TableServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters TableServiceProperties) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableServiceName": autorest.Encode("path", "default"), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the +// http.Response Body if it receives an error. +func (client TableServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always +// closes the http.Response Body. +func (client TableServicesClient) SetServicePropertiesResponder(resp *http.Response) (result TableServiceProperties, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/usages.go new file mode 100644 index 000000000000..4ef93c30a6ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/usages.go @@ -0,0 +1,112 @@ +package storage + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// UsagesClient is the the Azure Storage Management API. +type UsagesClient struct { + BaseClient +} + +// NewUsagesClient creates an instance of the UsagesClient client. +func NewUsagesClient(subscriptionID string) UsagesClient { + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ListByLocation gets the current usage count and the limit for the resources of the location under the subscription. +// Parameters: +// location - the location of the Azure Storage resource. +func (client UsagesClient) ListByLocation(ctx context.Context, location string) (result UsageListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.ListByLocation") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.UsagesClient", "ListByLocation", err.Error()) + } + + req, err := client.ListByLocationPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", nil, "Failure preparing request") + return + } + + resp, err := client.ListByLocationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure sending request") + return + } + + result, err = client.ListByLocationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure responding to request") + return + } + + return +} + +// ListByLocationPreparer prepares the ListByLocation request. +func (client UsagesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByLocationSender sends the ListByLocation request. The method will close the +// http.Response Body if it receives an error. +func (client UsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByLocationResponder handles the response to the ListByLocation request. The method always +// closes the http.Response Body. +func (client UsagesClient) ListByLocationResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/version.go similarity index 88% rename from vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/version.go index 0834dcece3ec..5b3e81db0a40 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage/version.go @@ -1,4 +1,4 @@ -package account +package storage import "github.com/Azure/azure-sdk-for-go/version" @@ -10,7 +10,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " account/2016-11-01" + return "Azure-SDK-For-Go/" + Version() + " storage/2021-04-01" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids/parse.go b/vendor/github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids/parse.go index 3272b21d0215..609e825bdedf 100644 --- a/vendor/github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids/parse.go +++ b/vendor/github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids/parse.go @@ -167,6 +167,12 @@ func (p Parser) Parse(input string, insensitively bool) (*ParseResult, error) { return nil, fmt.Errorf("expected %d segments but got %d for %q", len(p.segments), len(parsed), input) } + for k, v := range parsed { + if v == "" { + return nil, fmt.Errorf("segment %q is required but got an empty value", k) + } + } + return &ParseResult{ Parsed: parsed, }, nil diff --git a/vendor/modules.txt b/vendor/modules.txt index 8d14cebdebb2..69fb7fbf87db 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -23,7 +23,7 @@ github.com/Azure/azure-sdk-for-go/services/botservice/mgmt/2021-03-01/botservice github.com/Azure/azure-sdk-for-go/services/cdn/mgmt/2020-09-01/cdn github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2021-04-30/cognitiveservices github.com/Azure/azure-sdk-for-go/services/communication/mgmt/2020-08-20/communication -github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute +github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute github.com/Azure/azure-sdk-for-go/services/consumption/mgmt/2019-10-01/consumption github.com/Azure/azure-sdk-for-go/services/containerinstance/mgmt/2019-12-01/containerinstance github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice @@ -32,9 +32,7 @@ github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-10-15/documentdb github.com/Azure/azure-sdk-for-go/services/costmanagement/mgmt/2019-10-01/costmanagement github.com/Azure/azure-sdk-for-go/services/databoxedge/mgmt/2020-12-01/databoxedge github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory -github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/account github.com/Azure/azure-sdk-for-go/services/datalake/store/2016-11-01/filesystem -github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account github.com/Azure/azure-sdk-for-go/services/datamigration/mgmt/2018-04-19/datamigration github.com/Azure/azure-sdk-for-go/services/datashare/mgmt/2019-11-01/datashare github.com/Azure/azure-sdk-for-go/services/devspaces/mgmt/2019-04-01/devspaces @@ -115,6 +113,7 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2020-06-01/resources github.com/Azure/azure-sdk-for-go/services/search/mgmt/2020-03-13/search github.com/Azure/azure-sdk-for-go/services/servicefabric/mgmt/2021-06-01/servicefabric github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage +github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage github.com/Azure/azure-sdk-for-go/services/storagecache/mgmt/2021-03-01/storagecache github.com/Azure/azure-sdk-for-go/services/storagesync/mgmt/2020-03-01/storagesync github.com/Azure/azure-sdk-for-go/services/subscription/mgmt/2020-09-01/subscription @@ -254,7 +253,7 @@ github.com/googleapis/gax-go/v2 # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap -# github.com/hashicorp/go-azure-helpers v0.17.0 +# github.com/hashicorp/go-azure-helpers v0.17.1 ## explicit github.com/hashicorp/go-azure-helpers/authentication github.com/hashicorp/go-azure-helpers/lang/dates diff --git a/website/docs/r/backup_protected_vm.html.markdown b/website/docs/r/backup_protected_vm.html.markdown index 5e8debb7576d..f7aca9ea25ba 100644 --- a/website/docs/r/backup_protected_vm.html.markdown +++ b/website/docs/r/backup_protected_vm.html.markdown @@ -56,6 +56,10 @@ The following arguments are supported: * `backup_policy_id` - (Required) Specifies the id of the backup policy to use. +* `exclude_disk_luns` - (Optional) A list of Disks' Logical Unit Numbers(LUN) to be excluded for VM Protection. + +* `include_disk_luns` - (Optional) A list of Disks' Logical Unit Numbers(LUN) to be included for VM Protection. + * `tags` - (Optional) A mapping of tags to assign to the resource. ## Attributes Reference diff --git a/website/docs/r/iothub.html.markdown b/website/docs/r/iothub.html.markdown index 62031339c8de..aee680f9729c 100644 --- a/website/docs/r/iothub.html.markdown +++ b/website/docs/r/iothub.html.markdown @@ -202,7 +202,7 @@ A `route` block supports the following: * `name` - (Required) The name of the route. -* `source` - (Required) The source that the routing rule is to be applied to, such as `DeviceMessages`. Possible values include: `RoutingSourceInvalid`, `RoutingSourceDeviceMessages`, `RoutingSourceTwinChangeEvents`, `RoutingSourceDeviceLifecycleEvents`, `RoutingSourceDeviceConnectionStateEvents`, `RoutingSourceDeviceJobLifecycleEvents`. +* `source` - (Required) The source that the routing rule is to be applied to, such as `DeviceMessages`. Possible values include: `Invalid`, `DeviceMessages`, `TwinChangeEvents`, `DeviceLifecycleEvents`, `DeviceConnectionStateEvents`, `DeviceJobLifecycleEvents`. * `condition` - (Optional) The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. @@ -224,7 +224,7 @@ An `enrichment` block supports the following: A `fallback_route` block supports the following: -* `source` - (Optional) The source that the routing rule is to be applied to, such as `DeviceMessages`. Possible values include: `RoutingSourceInvalid`, `RoutingSourceDeviceMessages`, `RoutingSourceTwinChangeEvents`, `RoutingSourceDeviceLifecycleEvents`, `RoutingSourceDeviceConnectionStateEvents`, `RoutingSourceDeviceJobLifecycleEvents`. +* `source` - (Optional) The source that the routing rule is to be applied to, such as `DeviceMessages`. Possible values include: `Invalid`, `DeviceMessages`, `TwinChangeEvents`, `DeviceLifecycleEvents`, `DeviceConnectionStateEvents`, `DeviceJobLifecycleEvents`. * `condition` - (Optional) The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. diff --git a/website/docs/r/machine_learning_workspace.html.markdown b/website/docs/r/machine_learning_workspace.html.markdown index afcc7e523af1..913b0351a196 100644 --- a/website/docs/r/machine_learning_workspace.html.markdown +++ b/website/docs/r/machine_learning_workspace.html.markdown @@ -60,6 +60,99 @@ resource "azurerm_machine_learning_workspace" "example" { } ``` +## Example Usage with Data encryption + +~> **NOTE:** The Key Vault must enable purge protection. + +```hcl +provider "azurerm" { + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_application_insights" "example" { + name = "workspace-example-ai" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + application_type = "web" +} + +resource "azurerm_key_vault" "example" { + name = "workspaceexamplekeyvault" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "premium" + purge_protection_enabled = true +} +resource "azurerm_key_vault_access_policy" "example" { + key_vault_id = azurerm_key_vault.example.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "Create", + "Get", + "Delete", + "Purge", + ] +} + +resource "azurerm_storage_account" "example" { + name = "workspacestorageaccount" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + account_tier = "Standard" + account_replication_type = "GRS" +} + +resource "azurerm_key_vault_key" "example" { + name = "workspaceexamplekeyvaultkey" + key_vault_id = azurerm_key_vault.example.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + depends_on = [azurerm_key_vault.example, azurerm_key_vault_access_policy.example] +} + +resource "azurerm_machine_learning_workspace" "example" { + name = "example-workspace" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + application_insights_id = azurerm_application_insights.example.id + key_vault_id = azurerm_key_vault.example.id + storage_account_id = azurerm_storage_account.example.id + + identity { + type = "SystemAssigned" + } + + encryption { + key_vault_id = azurerm_key_vault.example.id + key_id = azurerm_key_vault_key.example.id + } +} +``` + ## Argument Reference The following arguments are supported: @@ -108,6 +201,14 @@ An `identity` block supports the following: * `type` - (Required) The Type of Identity which should be used for this Disk Encryption Set. At this time the only possible value is `SystemAssigned`. +--- + +An `encryption` block supports the following: + +* `key_vault_id` - (Required) The ID of the keyVault where the customer owned encryption key is present. + +* `key_id` - (Required) The Key Vault URI to access the encryption key. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/managed_disk.html.markdown b/website/docs/r/managed_disk.html.markdown index 9444e69e5175..956215abd1a1 100644 --- a/website/docs/r/managed_disk.html.markdown +++ b/website/docs/r/managed_disk.html.markdown @@ -101,6 +101,10 @@ The following arguments are supported: * `disk_mbps_read_write` - (Optional) The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second. +* `disk_iops_read_only` - (Optional) The number of IOPS allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks with shared disk enabled. One operation can transfer between 4k and 256k bytes. + +* `disk_mbps_read_only` - (Optional) The bandwidth allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks with shared disk enabled. MBps means millions of bytes per second. + * `disk_size_gb` - (Optional, Required for a new managed disk) Specifies the size of the managed disk to create in gigabytes. If `create_option` is `Copy` or `FromImage`, then the value must be equal to or greater than the source's size. The size can only be increased. ~> **NOTE:** Changing this value is disruptive if the disk is attached to a Virtual Machine. The VM will be shut down and de-allocated as required by Azure to action the change. Terraform will attempt to start the machine again after the update if it was in a `running` state when the apply was started. diff --git a/website/docs/r/monitor_private_link_scope.html.markdown b/website/docs/r/monitor_private_link_scope.html.markdown new file mode 100644 index 000000000000..63c5f7eda4aa --- /dev/null +++ b/website/docs/r/monitor_private_link_scope.html.markdown @@ -0,0 +1,58 @@ +--- +subcategory: "Monitor" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_monitor_private_link_scope" +description: |- + Manages an Azure Monitor Private Link Scope +--- + +# azurerm_monitor_private_link_scope + +Manages an Azure Monitor Private Link Scope. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_monitor_private_link_scope" "example" { + name = "example-ampls" + resource_group_name = azurerm_resource_group.example.name +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Azure Monitor Private Link Scope. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the Azure Monitor Private Link Scope should exist. Changing this forces a new resource to be created. + +* `tags` - (Optional) A mapping of tags which should be assigned to the Azure Monitor Private Link Scope. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Azure Monitor Private Link Scope. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 5 hours) Used when creating the Azure Monitor Private Link Scope. +* `read` - (Defaults to 5 minutes) Used when retrieving the Azure Monitor Private Link Scope. +* `update` - (Defaults to 5 hours) Used when updating the Azure Monitor Private Link Scope. +* `delete` - (Defaults to 5 hours) Used when deleting the Azure Monitor Private Link Scope. + +## Import + +Azure Monitor Private Link Scopes can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_monitor_private_link_scope.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Insights/privateLinkScopes/pls1 +``` diff --git a/website/docs/r/mssql_server.html.markdown b/website/docs/r/mssql_server.html.markdown index 60eddccd3c3b..a80c96a74b2e 100644 --- a/website/docs/r/mssql_server.html.markdown +++ b/website/docs/r/mssql_server.html.markdown @@ -44,13 +44,6 @@ resource "azurerm_mssql_server" "example" { object_id = "00000000-0000-0000-0000-000000000000" } - extended_auditing_policy { - storage_endpoint = azurerm_storage_account.example.primary_blob_endpoint - storage_account_access_key = azurerm_storage_account.example.primary_access_key - storage_account_access_key_is_secondary = true - retention_in_days = 6 - } - tags = { environment = "production" } @@ -74,8 +67,6 @@ The following arguments are supported: * `azuread_administrator` - (Optional) An `azuread_administrator` block as defined below. -* `extended_auditing_policy` - (Optional) A `extended_auditing_policy` block as defined below. - * `connection_policy` - (Optional) The connection policy the server will use. Possible values are `Default`, `Proxy`, and `Redirect`. Defaults to `Default`. * `identity` - (Optional) An `identity` block as defined below. @@ -132,20 +123,6 @@ An `azuread_administrator` block supports the following: * `azuread_authentication_only` - (Optional) Specifies whether only AD Users and administrators (like `azuread_administrator.0.login_username`) can be used to login or also local database users (like `administrator_login`). ---- - -An `extended_auditing_policy` block supports the following: - -* `storage_account_access_key` - (Optional) Specifies the access key to use for the auditing storage account. - -* `storage_endpoint` - (Optional) Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). - -* `storage_account_access_key_is_secondary` - (Optional) Specifies whether `storage_account_access_key` value is the storage's secondary key. - -* `retention_in_days` - (Optional) Specifies the number of days to retain logs for in the storage account. - -* `log_monitoring_enabled` - (Optional) Enable audit events to Azure Monitor? To enable server audit events to Azure Monitor, please enable its main database audit events to Azure Monitor. - ### Timeouts The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: diff --git a/website/docs/r/mysql_flexible_server_firewall_rule.html.markdown b/website/docs/r/mysql_flexible_server_firewall_rule.html.markdown new file mode 100644 index 000000000000..84d015976b7f --- /dev/null +++ b/website/docs/r/mysql_flexible_server_firewall_rule.html.markdown @@ -0,0 +1,113 @@ +--- +subcategory: "Database" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_mysql_flexible_server_firewall_rule" +description: |- + Manages a Firewall Rule for a MySQL Flexible Server. +--- + +# azurerm_mysql_flexible_server_firewall_rule + +Manages a Firewall Rule for a MySQL Flexible Server. + +## Example Usage (Single IP Address) + +```hcl +resource "azurerm_resource_group" "example" { + name = "api-rg-pro" + location = "West Europe" +} + +resource "azurerm_mysql_flexible_server" "example" { + # ... +} + +resource "azurerm_mysql_flexible_server_firewall_rule" "example" { + name = "office" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_mysql_flexible_server.example.name + start_ip_address = "40.112.8.12" + end_ip_address = "40.112.8.12" +} +``` + +## Example Usage (IP Range) + +```hcl +resource "azurerm_resource_group" "example" { + name = "api-rg-pro" + location = "West Europe" +} + +resource "azurerm_mysql_flexible_server" "example" { + # ... +} + +resource "azurerm_mysql_flexible_server_firewall_rule" "example" { + name = "office" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_mysql_flexible_server.example.name + start_ip_address = "40.112.0.0" + end_ip_address = "40.112.255.255" +} +``` + +## Example Usage (Allow access to Azure services) + +```hcl +resource "azurerm_resource_group" "example" { + name = "api-rg-pro" + location = "West Europe" +} + +resource "azurerm_mysql_flexible_server" "example" { + # ... +} + +resource "azurerm_mysql_flexible_server_firewall_rule" "example" { + name = "office" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_mysql_flexible_server.example.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the MySQL Firewall Rule. Changing this forces a new resource to be created. + +* `server_name` - (Required) Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. + +* `start_ip_address` - (Required) Specifies the Start IP Address associated with this Firewall Rule. Changing this forces a new resource to be created. + +* `end_ip_address` - (Required) Specifies the End IP Address associated with this Firewall Rule. Changing this forces a new resource to be created. + +-> **NOTE:** The Azure feature `Allow access to Azure services` can be enabled by setting `start_ip_address` and `end_ip_address` to `0.0.0.0` which ([is documented in the Azure API Docs](https://docs.microsoft.com/en-us/rest/api/sql/firewallrules/createorupdate)). + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the MySQL Firewall Rule. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the MySQL Firewall Rule. +* `update` - (Defaults to 30 minutes) Used when updating the MySQL Firewall Rule. +* `read` - (Defaults to 5 minutes) Used when retrieving the MySQL Firewall Rule. +* `delete` - (Defaults to 30 minutes) Used when deleting the MySQL Firewall Rule. + +## Import + +MySQL Firewall Rule's can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_mysql_flexible_server_firewall_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/firewallRules/firewallRule1 +``` diff --git a/website/docs/r/orchestrated_virtual_machine_scale_set.html.markdown b/website/docs/r/orchestrated_virtual_machine_scale_set.html.markdown index cda285e7ee83..603386730b40 100644 --- a/website/docs/r/orchestrated_virtual_machine_scale_set.html.markdown +++ b/website/docs/r/orchestrated_virtual_machine_scale_set.html.markdown @@ -10,9 +10,13 @@ description: |- Manages an Orchestrated Virtual Machine Scale Set. --> **Note:** Orchestrated Virtual Machine Scale Sets are in Public Preview and it may receive breaking changes - [more details can be found in the Azure Documentation](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/orchestration-modes). +## Disclaimers --> **Note:** Azure is planning to deprecate the `single_placement_group` attribute in the Orchestrated Virtual Machine Scale Set starting from api-version `2019-12-01` and there will be a breaking change in the Orchestrated Virtual Machine Scale Set. +~> **NOTE:** All arguments including the administrator login and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](/docs/state/sensitive-data.html). + +~> **NOTE:** Orchestrated Virtual Machine Scale Sets are in Public Preview and it may receive breaking changes - [more details can be found in the Azure Documentation](https://docs.microsoft.com/azure/virtual-machine-scale-sets/orchestration-modes). + +~> **NOTE:** Due to a bug in the service code `extensions` are not currently supported in the `azurerm_orchestrated_virtual_machine_scale_set` resource. The ETA for `extensions` support is tentatively set for January 15, 2022. ## Example Usage @@ -47,16 +51,340 @@ The following arguments are supported: ~> **NOTE:** The number of Fault Domains varies depending on which Azure Region you're using - a list can be found [here](https://github.com/MicrosoftDocs/azure-docs/blob/master/includes/managed-disks-common-fault-domain-region-list.md). -* `proximity_placement_group_id` - (Optional) The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. Changing this forces a new resource to be created. +* `sku_name` - (Optional) The `name` of the sku to be used by this Orcestrated Virtual Machine Scale Set. Valid values include: any of the [General purpose](https://docs.microsoft.com/azure/virtual-machines/sizes-general), [Compute optimized](https://docs.microsoft.com/azure/virtual-machines/sizes-compute), [Memory optimized](https://docs.microsoft.com/azure/virtual-machines/sizes-memory), [Storage optimized](https://docs.microsoft.com/azure/virtual-machines/sizes-storage), [GPU optimized](https://docs.microsoft.com/azure/virtual-machines/sizes-gpu), [FPGA optimized](https://docs.microsoft.com/azure/virtual-machines/sizes-field-programmable-gate-arrays), [High performance](https://docs.microsoft.com/azure/virtual-machines/sizes-hpc), or [Previous generation](https://docs.microsoft.com/azure/virtual-machines/sizes-previous-gen) virtual machine sku's. + +* `instances`- (Optional) The number of Virtual Machines in the Orcestrated Virtual Machine Scale Set. + +* `network_interface` - (Optional) One or more `network_interface` blocks as defined below. + +* `os_profile` - (Optional) An `os_profile` block as defined below. + +* `os_disk` - (Optional) An `os_disk` block as defined below. + +* `boot_diagnostics` - (Optional) A `boot_diagnostics` block as defined below. + +* `computer_name_prefix` - (Optional) The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid `computer_name_prefix`, then you must specify `computer_name_prefix`. + +* `data_disk` - (Optional) One or more `data_disk` blocks as defined below. + + +* `eviction_policy` - (Optional) The Policy which should be used Virtual Machines are Evicted from the Scale Set. Changing this forces a new resource to be created. -* `single_placement_group` - (Optional) Should the Orchestrated Virtual Machine Scale Set use single placement group? Defaults to `false`. +* `identity` - (Optional) A `identity` block as defined below. + +* `license_type` - (Optional) Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Orchestrated Virtual Machine Scale Set. Possible values are `None`, `Windows_Client` and `Windows_Server`. Changing this forces a new resource to be created. + +* `max_bid_price` - (Optional) The maximum price you're willing to pay for each Orchestrated Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to `-1`, which means that each Virtual Machine in the Orchestrated Scale Set should not be evicted for price reasons. + +* `plan` - (Optional) A `plan` block as documented below. + +* `priority` - (Optional) The Priority of this Orchestrated Virtual Machine Scale Set. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this value forces a new resource. + +* `source_image_id` - (Optional) The ID of an Image which each Virtual Machine in this Scale Set should be based on. + +* `source_image_reference` - (Optional) A `source_image_reference` block as defined below. + +* `termination_notification` - (Optional) A `termination_notification` block as defined below. + +* `proximity_placement_group_id` - (Optional) The ID of the Proximity Placement Group which the Orchestrated Virtual Machine should be assigned to. Changing this forces a new resource to be created. * `zones` - (Optional) A list of Availability Zones in which the Virtual Machines in this Scale Set should be created in. Changing this forces a new resource to be created. -~> **Note:** Due to a limitation of the Azure API at this time only one Availability Zone can be defined. +~> **NOTE:** Due to a limitation of the Azure API at this time only one Availability Zone can be defined. * `tags` - (Optional) A mapping of tags which should be assigned to this Orchestrated Virtual Machine Scale Set. +--- + +A `os_profile` block supports the following: + +* `custom_data` - (Optional) The Base64-Encoded Custom Data which should be used for this Orchestrated Virtual Machine Scale Set. + +~> **NOTE:** When Custom Data has been configured, it's not possible to remove it without tainting the Orchestrated Virtual Machine Scale Set, due to a limitation of the Azure API. + +* `windows_configuration` - (Optional) A `windows_configuration` block as documented below. + +* `linux_configuration` - (Optional) A `linux_configuration` block as documented below. + +--- + +A `windows_configuration` block supports the following: + +* `admin_username` - (Required) The username of the local administrator on each Orchestrated Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + +* `admin_password` - (Required) The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + +* `computer_name_prefix` - (Optional) The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the `name` field. If the value of the `name` field is not a valid `computer_name_prefix`, then you must specify `computer_name_prefix`. + +* `enable_automatic_updates` - (Optional) Are automatic updates enabled for this Virtual Machine? Defaults to `true`. + +* `provision_vm_agent` - (Optional) Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to `true`. Changing this value forces a new resource to be created. + +* `secret` - (Optional) One or more `secret` blocks as defined below. + +* `timezone` - (Optional) Specifies the time zone of the virtual machine, the possible values are defined [here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). + +* `winrm_listener` - (Optional) One or more `winrm_listener` blocks as defined below. + +--- + +A `linux_configuration` block supports the following: + +* `admin_username` - (Required) The username of the local administrator on each Orchestrated Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + +* `admin_password` - (Optional) The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + +* `admin_ssh_key` - (Optional) A `admin_ssh_key` block as documented below. + +* `disable_password_authentication` - (Optional) When an `admin_password` is specified `disable_password_authentication` must be set to `false`. Defaults to `true`. + +~> **NOTE:** Either `admin_password` or `admin_ssh_key` must be specified. + +* `provision_vm_agent` - (Optional) Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to `true`. Changing this value forces a new resource to be created. + +* `secret` - (Optional) One or more `secret` blocks as defined below. + +--- + +A `secret` block supports the following: + +* `key_vault_id` - (Required) The ID of the Key Vault from which all Secrets should be sourced. + +* `certificate` - (Required) One or more `certificate` blocks as defined below. + +~> **NOTE:** The schema of the `certificate` block is slightly different depending on if you are provisioning a `windows_configuration` or a `linux_configuration`. + +--- + +A (Windows) `certificate` block supports the following: + +* `store` - (Required) The certificate store on the Virtual Machine where the certificate should be added. + +* `url` - (Required) The Secret URL of a Key Vault Certificate. + +--- + +A (Linux) `certificate` block supports the following: + +* `url` - (Required) The Secret URL of a Key Vault Certificate. + +--- + +A `admin_ssh_key` block supports the following: + +* `public_key` - (Required) The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. Changing this forces a new resource to be created. + +* `username` - (Required) The Username for which this Public SSH Key should be configured. Changing this forces a new resource to be created. + +~> **NOTE:** The Azure VM Agent only allows creating SSH Keys at the path `/home/{username}/.ssh/authorized_keys` - as such this public key will be written to the authorized keys file. + +--- + +A `winrm_listener` block supports the following: + +* `certificate_url` - (Optional) The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to `Https`. + +~> **NOTE:** This can be sourced from the `secret_id` field within the `azurerm_key_vault_certificate` Resource. + +--- + +A `automatic_instance_repair` block supports the following: + +* `enabled` - (Required) Should the automatic instance repair be enabled on this Orchestrated Virtual Machine Scale Set? Possible values are `true` and `false`. Defaults to `false`. + +* `grace_period` - (Optional) Amount of time for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. Possible values are between `30` and `90` minutes. Defaults to `30` minutes. The time duration should be specified in `ISO 8601` format (e.g. `PT30M` to `PT90M`). + +--- + +A `boot_diagnostics` block supports the following: + +* `storage_account_uri` - (Optional) The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. By including a `boot_diagnostics` block without passing the `storage_account_uri` field will cause the API to utilize a Managed Storage Account to store the Boot Diagnostics output. + +--- + +A `certificate` block supports the following: + +* `store` - (Required) The certificate store on the Virtual Machine where the certificate should be added. + +* `url` - (Required) The Secret URL of a Key Vault Certificate. + +~> **NOTE:** This can be sourced from the `secret_id` field within the `azurerm_key_vault_certificate` Resource. + +--- + +A `diff_disk_settings` block supports the following: + +* `option` - (Required) Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is `Local`. Changing this forces a new resource to be created. + +--- + +A `data_disk` block supports the following: + +* `caching` - (Required) The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + +* `create_option` - (Optional) The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + +* `disk_size_gb` - (Required) The size of the Data Disk which should be created. + +* `lun` - (Required) The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + +* `storage_account_type` - (Required) The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, Premium_LRS and UltraSSD_LRS. + +--- + + +A `ip_configuration` block supports the following: + +* `name` - (Required) The Name which should be used for this IP Configuration. + +* `application_gateway_backend_address_pool_ids` - (Optional) A list of Backend Address Pools ID's from a Application Gateway which this Orchestrated Virtual Machine Scale Set should be connected to. + +* `application_security_group_ids` - (Optional) A list of Application Security Group ID's which this Orchestrated Virtual Machine Scale Set should be connected to. + +* `load_balancer_backend_address_pool_ids` - (Optional) A list of Backend Address Pools ID's from a Load Balancer which this Orchestrated Virtual Machine Scale Set should be connected to. + +~> **NOTE:** When using this field you'll also need to configure a Rule for the Load Balancer, and use a depends_on between this resource and the Load Balancer Rule. + +* `primary` - (Optional) Is this the Primary IP Configuration for this Network Interface? Possible values are `true` and `false`. Defaults to `false`. + +~> **NOTE:** One `ip_configuration` block must be marked as Primary for each Network Interface. + +* `public_ip_address` - (Optional) A `public_ip_address` block as defined below. + +* `subnet_id` - (Optional) The ID of the Subnet which this IP Configuration should be connected to. + +~> **NOTE:** `subnet_id` is required if version is set to `IPv4`. + +* `version` - (Optional) The Internet Protocol Version which should be used for this IP Configuration. Possible values are `IPv4` and `IPv6`. Defaults to `IPv4`. + +--- + +A `ip_tag` block supports the following: + +* `tag` - The IP Tag associated with the Public IP, such as `SQL` or `Storage`. + +* `type` - The Type of IP Tag, such as `FirstPartyUsage`. + +--- + +A `network_interface` block supports the following: + +* `name` - (Required) The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + +* `ip_configuration` - (Required) One or more `ip_configuration` blocks as defined above. + +* `dns_servers` - (Optional) A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + +* `enable_accelerated_networking` - (Optional) Does this Network Interface support Accelerated Networking? Possible values are `true` and `false`. Defaults to `false`. + +* `enable_ip_forwarding` - (Optional) Does this Network Interface support IP Forwarding? Possible values are `true` and `false`. Defaults to `false`. + +* `network_security_group_id` - (Optional) The ID of a Network Security Group which should be assigned to this Network Interface. + +* `primary` - (Optional) Is this the Primary IP Configuration? Possible values are `true` and `false`. Defaults to `false`. + +~> **NOTE:** If multiple `network_interface` blocks are specified, one must be set to `primary`. + +--- + +A `os_disk` block supports the following: + +* `caching` - (Required) The Type of Caching which should be used for the Internal OS Disk. Possible values are `None`, `ReadOnly` and `ReadWrite`. + +* `storage_account_type` - (Required) The Type of Storage Account which should back this the Internal OS Disk. Possible values include `Standard_LRS`, `StandardSSD_LRS` and `Premium_LRS`. + +* `diff_disk_settings` - (Optional) A `diff_disk_settings` block as defined above. + +* `disk_encryption_set_id` - (Optional) The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. + +~> **NOTE:** Disk Encryption Sets are in Public Preview in a limited set of regions + +* `disk_size_gb` - (Optional) The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + +--- + +A `plan` block supports the following: + +* `name` - (Required) Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + +* `publisher` - (Required) Specifies the publisher of the image. Changing this forces a new resource to be created. + +* `product` - (Required) Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + +--- + +A `identity` block supports the following: + +* `type` - (Required) The type of Managed Identity which should be assigned to the Windows Orchestrated Virtual Machine Scale Set. Possible values are `UserAssigned` and `None`. + +* `identity_ids` - (Optional) A list of User Managed Identity ID's which should be assigned to the Orchestrated Windows Virtual Machine Scale Set. + +~> **NOTE:** This is required when type is set to `UserAssigned`. + +--- + +A `public_ip_address` block supports the following: + +* `name` - (Required) The Name of the Public IP Address Configuration. + +* `domain_name_label` - (Optional) The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. Valid values must be between `1` and `26` characters long, start with a lower case letter, end with a lower case letter or number and contains only `a-z`, `0-9` and `hyphens`. + +* `idle_timeout_in_minutes` - (Optional) The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range `4` to `32`. + +* `ip_tag` - (Optional) One or more `ip_tag` blocks as defined above. + +* `public_ip_prefix_id` - (Optional) The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + +--- + +A `termination_notification` block supports the following: + +* `enabled` - (Required) Should the terminate notification be enabled on this Virtual Machine Scale Set? Possible values `true` or `false` Defaults to `false`. + +* `timeout` - (Optional) Length of time (in minutes, between `5` and `15`) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in `ISO 8601` format. Defaults to `PT5M`. + +--- + +A `source_image_reference` block supports the following: + +* `publisher` - (Optional) Specifies the publisher of the image used to create the virtual machines. + +* `offer` - (Optional) Specifies the offer of the image used to create the virtual machines. + +* `sku` - (Optional) Specifies the SKU of the image used to create the virtual machines. + +* `version` - (Optional) Specifies the version of the image used to create the virtual machines. + +--- + ## Attributes Reference In addition to all arguments above, the following attributes are exported: diff --git a/website/docs/r/signalr_service.html.markdown b/website/docs/r/signalr_service.html.markdown index 5c7d1e63bdca..6191363debda 100644 --- a/website/docs/r/signalr_service.html.markdown +++ b/website/docs/r/signalr_service.html.markdown @@ -136,5 +136,5 @@ The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/d SignalR services can be imported using the `resource id`, e.g. ```shell -terraform import azurerm_signalr_service.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/terraform-signalr/providers/Microsoft.SignalRService/SignalR/tfex-signalr +terraform import azurerm_signalr_service.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/terraform-signalr/providers/Microsoft.SignalRService/signalR/tfex-signalr ``` diff --git a/website/docs/r/signalr_service_network_acl.html.markdown b/website/docs/r/signalr_service_network_acl.html.markdown index 7e6e27ccaa63..5c3f0afa10c2 100644 --- a/website/docs/r/signalr_service_network_acl.html.markdown +++ b/website/docs/r/signalr_service_network_acl.html.markdown @@ -136,5 +136,5 @@ The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/d Network ACLs for a SignalR service can be imported using the `resource id`, e.g. ```shell -terraform import azurerm_signalr_service_network_acl.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.SignalRService/SignalR/signalr1 +terraform import azurerm_signalr_service_network_acl.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.SignalRService/signalR/signalr1 ``` diff --git a/website/docs/r/sql_managed_instance.html.markdown b/website/docs/r/sql_managed_instance.html.markdown index d004b557289c..97d8359c60f4 100644 --- a/website/docs/r/sql_managed_instance.html.markdown +++ b/website/docs/r/sql_managed_instance.html.markdown @@ -237,6 +237,8 @@ The following arguments are supported: * `dns_zone_partner_id` - (Optional) The ID of the Managed Instance which will share the DNS zone. This is a prerequisite for creating a failover group, although creation of a failover group is not yet possible in `azurerm`. Setting this after creation forces a new resource to be created. +* `identity` - (Optional) An `identity` block as defined below. + * `tags` - (Optional) A mapping of tags to assign to the resource. --- @@ -245,6 +247,12 @@ A `sku` block supports the following: * `name` - (Required) Sku of the managed instance. Values can be `GP_Gen4`, `GP_Gen5`, `BC_Gen4`, or `BC_Gen5`. +--- + + An `identity` block supports the following: + + * `type` - (Required) The identity type of the SQL Managed Instance. Only possible values is `SystemAssigned`. + ## Attributes Reference The following attributes are exported: @@ -253,6 +261,14 @@ The following attributes are exported: * `fqdn` - The fully qualified domain name of the Azure Managed SQL Instance +--- + + The `identity` block exports the following: + + * `principal_id` - The Principal ID for the Service Principal associated with the Identity of this SQL Managed Instance. + + * `tenant_id` - The Tenant ID for the Service Principal associated with the Identity of this SQL Managed Instance. + ## Import SQL Servers can be imported using the `resource id`, e.g. diff --git a/website/docs/r/sql_server.html.markdown b/website/docs/r/sql_server.html.markdown index 0a24c95c7d2f..1b54fe97fae4 100644 --- a/website/docs/r/sql_server.html.markdown +++ b/website/docs/r/sql_server.html.markdown @@ -40,13 +40,6 @@ resource "azurerm_sql_server" "example" { administrator_login = "mradministrator" administrator_login_password = "thisIsDog11" - extended_auditing_policy { - storage_endpoint = azurerm_storage_account.example.primary_blob_endpoint - storage_account_access_key = azurerm_storage_account.example.primary_access_key - storage_account_access_key_is_secondary = true - retention_in_days = 6 - } - tags = { environment = "production" } @@ -73,8 +66,6 @@ The following arguments are supported: * `identity` - (Optional) An `identity` block as defined below. -* `extended_auditing_policy` - (Optional) A `extended_auditing_policy` block as defined below. - * `threat_detection_policy` - (Optional) Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below. * `tags` - (Optional) A mapping of tags to assign to the resource. @@ -114,20 +105,6 @@ The following attributes are exported: -> You can access the Principal ID via `${azurerm_mssql_server.example.identity.0.principal_id}` and the Tenant ID via `${azurerm_mssql_server.example.identity.0.tenant_id}` ---- - -A `extended_auditing_policy` block supports the following: - -* `storage_account_access_key` - (Optional) Specifies the access key to use for the auditing storage account. - -* `storage_endpoint` - (Optional) Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). - -* `storage_account_access_key_is_secondary` - (Optional) Specifies whether `storage_account_access_key` value is the storage's secondary key. - -* `retention_in_days` - (Optional) Specifies the number of days to retain logs for in the storage account. - -* `log_monitoring_enabled` - (Optional) Enable audit events to Azure Monitor? To enable server audit events to Azure Monitor, please enable its primary database audit events to Azure Monitor. - ### Timeouts The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: diff --git a/website/docs/r/synapse_workspace.html.markdown b/website/docs/r/synapse_workspace.html.markdown index f65e91042b45..c8293b4fd907 100644 --- a/website/docs/r/synapse_workspace.html.markdown +++ b/website/docs/r/synapse_workspace.html.markdown @@ -53,6 +53,109 @@ resource "azurerm_synapse_workspace" "example" { } ``` +## Example Usage - creating a workspace with Customer Managed Key and Azure AD Admin + +``` +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_storage_account" "example" { + name = "examplestorageacc" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + account_tier = "Standard" + account_replication_type = "LRS" + account_kind = "StorageV2" + is_hns_enabled = "true" +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "example" { + name = "example" + storage_account_id = azurerm_storage_account.example.id +} + +resource "azurerm_key_vault" "example" { + name = "example" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" + purge_protection_enabled = true +} + +resource "azurerm_key_vault_access_policy" "deployer" { + key_vault_id = azurerm_key_vault.example.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "create", "get", "delete", "purge" + ] +} + +resource "azurerm_key_vault_key" "example" { + name = "workspaceencryptionkey" + key_vault_id = azurerm_key_vault.example.id + key_type = "RSA" + key_size = 2048 + key_opts = [ + "unwrapKey", + "wrapKey" + ] + depends_on = [ + azurerm_key_vault_access_policy.deployer + ] +} + +resource "azurerm_synapse_workspace" "example" { + name = "example" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id + sql_administrator_login = "sqladminuser" + sql_administrator_login_password = "H@Sh1CoR3!" + customer_managed_key { + key_versionless_id = azurerm_key_vault_key.example.versionless_id + key_name = "enckey" + } + + tags = { + Env = "production" + } +} + +resource "azurerm_key_vault_access_policy" "workspace_policy" { + key_vault_id = azurerm_key_vault.example.id + tenant_id = azurerm_synapse_workspace.example.identity[0].tenant_id + object_id = azurerm_synapse_workspace.example.identity[0].principal_id + + key_permissions = [ + "Get", "WrapKey", "UnwrapKey" + ] +} + +resource "azurerm_synapse_workspace_key" "example" { + customer_managed_key_versionless_id = azurerm_key_vault_key.example.versionless_id + synapse_workspace_id = azurerm_synapse_workspace.example.id + active = true + customer_managed_key_name = "enckey" + depends_on = [azurerm_key_vault_access_policy.workspace_policy] +} + +resource "azurerm_synapse_workspace_aad_admin" "example" { + synapse_workspace_id = azurerm_synapse_workspace.example.id + login = "AzureAD Admin" + object_id = "00000000-0000-0000-0000-000000000000" + tenant_id = "00000000-0000-0000-0000-000000000000" + + depends_on = [azurerm_synapse_workspace_key.example] +} +``` + ## Arguments Reference The following arguments are supported: @@ -85,13 +188,13 @@ The following arguments are supported: * `managed_resource_group_name` - (Optional) Workspace managed resource group. -* `aad_admin` - (Optional) An `aad_admin` block as defined below. +* `aad_admin` - (Optional) An `aad_admin` block as defined below. Conflicts with `customer_managed_key`. * `azure_devops_repo` - (Optional) An `azure_devops_repo` block as defined below. * `github_repo` - (Optional) A `github_repo` block as defined below. -* `customer_managed_key` - (Optional) A `customer_managed_key` block as defined below. +* `customer_managed_key` - (Optional) A `customer_managed_key` block as defined below. Conflicts with `aad_admin`. * `sql_aad_admin` - (Optional) An `sql_aad_admin` block as defined below. diff --git a/website/docs/r/synapse_workspace_aad_admin.html.markdown b/website/docs/r/synapse_workspace_aad_admin.html.markdown new file mode 100644 index 000000000000..e4c891a5e0fa --- /dev/null +++ b/website/docs/r/synapse_workspace_aad_admin.html.markdown @@ -0,0 +1,119 @@ +--- +subcategory: "Synapse" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_synapse_workspace_aad_admin" +description: |- + Manages Synapse Workspace AAD Admin +--- + +# azurerm_synapse_workspace_aad_admin + +Manages an Azure Active Directory Administrator setting for a Synapse Workspace + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_storage_account" "example" { + name = "examplestorageacc" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + account_tier = "Standard" + account_replication_type = "LRS" + account_kind = "StorageV2" + is_hns_enabled = "true" +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "example" { + name = "example" + storage_account_id = azurerm_storage_account.example.id +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_key_vault" "example" { + name = "example" + location = azurerm_resource_group.exampl.location + resource_group_name = azurerm_resource_group.example.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" + purge_protection_enabled = true +} + +resource "azurerm_key_vault_access_policy" "deployer" { + key_vault_id = azurerm_key_vault.example.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "create", "get", "delete", "purge" + ] +} + +resource "azurerm_key_vault_key" "example" { + name = "workspace_encryption_key" + key_vault_id = azurerm_key_vault.example.id + key_type = "RSA" + key_size = 2048 + key_opts = [ + "unwrapKey", + "wrapKey" + ] + depends_on = [ + azurerm_key_vault_access_policy.deployer + ] +} + +resource "azurerm_synapse_workspace" "example" { + name = "example" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id + sql_administrator_login = "sqladminuser" + sql_administrator_login_password = "H@Sh1CoR3!" + + tags = { + Env = "production" + } +} + +resource "azurerm_synapse_workspace_aad_admin" "test" { + synapse_workspace_id = azurerm_synapse_workspace.test.id + login = "AzureAD Admin" + object_id = data.azurerm_client_config.current.object_id + tenant_id = data.azurerm_client_config.current.tenant_id +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `synapse_workspace_id` - (Required) The ID of the Synapse Workspace where the Azure AD Administrator should be configured. + +* `login` - (Required) The login name of the Azure AD Administrator of this Synapse Workspace. + +* `object_id` - (Required) The object id of the Azure AD Administrator of this Synapse Workspace. + +* `tenant_id` - (Required) The tenant id of the Azure AD Administrator of this Synapse Workspace. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Synapse Workspace. +* `read` - (Defaults to 5 minutes) Used when retrieving the Synapse Workspace. +* `update` - (Defaults to 30 minutes) Used when updating the Synapse Workspace. +* `delete` - (Defaults to 30 minutes) Used when deleting the Synapse Workspace. + +## Import + +Synapse Workspace Azure AD Administrator can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_synapse_workspace_aad_admin.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.Synapse/workspaces/workspace1/administrators/activeDirectory +``` diff --git a/website/docs/r/virtual_network_gateway.html.markdown b/website/docs/r/virtual_network_gateway.html.markdown index 888260490118..42e1e703e623 100644 --- a/website/docs/r/virtual_network_gateway.html.markdown +++ b/website/docs/r/virtual_network_gateway.html.markdown @@ -152,9 +152,10 @@ The following arguments are supported: -> **NOTE:** The available values depend on the `type` and `sku` arguments - where `Generation2` is only value for a `sku` larger than `VpnGw2` or `VpnGw2AZ`. -* `ip_configuration` (Required) One or two `ip_configuration` blocks documented below. - An active-standby gateway requires exactly one `ip_configuration` block whereas - an active-active gateway requires exactly two `ip_configuration` blocks. +* `ip_configuration` (Required) One, two or three `ip_configuration` blocks documented below. + An active-standby gateway requires exactly one `ip_configuration` block, + an active-active gateway requires exactly two `ip_configuration` blocks whereas + an active-active zone redundant gateway with P2S configuration requires exactly three `ip_configuration` blocks. * `vpn_client_configuration` (Optional) A `vpn_client_configuration` block which is documented below. In this block the Virtual Network Gateway can be configured