diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000000..4e8d9293d05e --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,31 @@ +issues: + max-per-linter: 0 + max-same-issues: 0 + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gofmt + - gosimple + - govet + - ineffassign + - interfacer + - nakedret + - misspell + - staticcheck + - structcheck + - typecheck + - unused + - unconvert + - varcheck + - vet + - vetshadow + +linters-settings: + errcheck: + ignore: github.com/hashicorp/terraform/helper/schema:ForceNew|Set,fmt:.*,io:Close + +run: + modules-download-mode: vendor \ No newline at end of file diff --git a/.gometalinter.json b/.gometalinter.json deleted file mode 100644 index 961b91cb9da7..000000000000 --- a/.gometalinter.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "Deadline": "10m", - "Enable": [ - "errcheck", - "gofmt", - "goimports", - "gotype", - "ineffassign", - "interfacer", - "nakedret", - "misspell", - "staticcheck", - "structcheck", - "unparam", - "unconvert", - "varcheck", - "vet", - "vetshadow" - ], - "EnableGC": true, - "Linters": { - "errcheck": { - "Command": "errcheck -abspath {not_tests=-ignoretests} -ignore github.com/hashicorp/terraform/helper/schema:ForceNew|Set -ignore io:Close" - }, - "nakedret": { - "Command": "nakedret -l 1" - } - }, - "Sort": [ - "path", - "line" - ], - "Vendor": true, - "WarnUnmatchedDirective": false -} diff --git a/CHANGELOG.md b/CHANGELOG.md index f94a185b4b26..d844d834154f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,35 +1,122 @@ -## 1.22.0 (Unreleased) +## 1.23.0 (Unreleased) FEATURES: -* **New Resource:** `azurerm_ddos_protection_plan` [GH-2654] +* **New Data Source:** `azurerm_api_management_group` [GH-2809] +* **New Data Source:** `azurerm_api_management_product` [GH-2953] +* **New Data Source:** `azurerm_api_management_user` [GH-2954] +* **New Data Source:** `azurerm_availability_set` [GH-2850] +* **New Data Source:** `azurerm_network_watcher` [GH-2791] +* **New Resource:** `azurerm_api_management_group` [GH-2809] +* **New Resource:** `azurerm_api_management_product` [GH-2953] +* **New Resource:** `azurerm_api_management_user` [GH-2954] +* **New Resource:** `azurerm_eventgrid_domain` [GH-2884] +* **New Resource:** `azurerm_lb_outbound_rule` [GH-2912] +* **New Resource:** `azurerm_media_service_account` [GH-2711] +* **New Resource:** `azurerm_network_watcher` [GH-2791] IMPROVEMENTS: -* dependencies: switching to Go Modules [GH-2705] -* dependencies: upgrading to v11.3.2 of github.com/Azure/go-autorest [GH-2744] -* `azurerm_application_gateway` - support for the `http2` property [GH-2735] -* `azurerm_application_gateway` - support for the `file_upload_limit_mb` property [GH-2666] -* `azurerm_application_gateway` - Support for `pick_host_name_from_backend_address` and `pick_host_name_from_backend_http_settings` properties [GH-2658] -* `azurerm_cosmosdb_account` - support for the `EnableAggregationPipeline`, `MongoDBv3.4` and ` mongoEnableDocLevelTTL` capabilities [GH-2715] -* `azurerm_data_lake_store_file` - support file uploads greater then 4 megabytes [GH-2633] -* `azurerm_mssql_elasticpool` - support for setting `max_size_bytes` [GH-2346] -* `azurerm_signalr_service` - exporting `primary_access_key`, `secondary_access_key`, `primary_connection_string` and `secondary_connection_string` and secondary access keys and connection strings [GH-2655] -* `azurerm_subnet` - support for additional subnet delegation types [GH-2667] +* dependencies: upgrading to v25.1.0 of `github.com/Azure/azure-sdk-for-go` [GH-2886] +* dependencies: upgrading to v11.4.0 of `github.com/Azure/go-autorest` [GH-2886] +* `azurerm_application_gateway` - support for setting `path` within the `backend_http_settings` block [GH-2879] +* `azurerm_application_gateway` - support for setting `connection_draining` to the `backend_http_settings` [GH-2778] +* `azurerm_iothub` - support for the `fallback_route` property [GH-2764] +* `azurerm_redis_cache` - support for configuring the `maxfragmentationmemory_reserved` in the `redis_configuration` block [GH-2887] +* `azurerm_service_fabric_cluster` - support for setting `capacities` and `placement_properties` [GH-2936] +* `azurerm_storage_account` - exposing primary/secondary `_host` attributes [GH-2792] BUG FIXES: -* `azurerm_azuread_application` - fixing a bug where `reply_uris` was set incorrectly [GH-2729] -* `azurerm_batch_pool` - can now set multiple environment variables [GH-2685] -* `azurerm_cosmosdb_account` - prevent occasional error when deleting the resource [GH-2702] -* `azurerm_cosmosdb_account` - allow empty values for the `ip_range_filter` property [GH-2713] -* `azurerm_express_route_circuit` - added the `premium` SKU back to validation logic [GH-2692] -* `azurerm_firewall` - ensuring rules aren't removed during an update [GH-2663] -* `azurerm_notification_hub_namespace` - now polls on creation to handle eventual consistency [GH-2701] -* `azurerm_redis_cache` - locking on the Virtual Network/Subnet name to avoid a race condition [GH-2725] -* `azurerm_service_bus_subscription` - name's can now start with a digit [GH-2672] -* `azurerm_security_center` - increase the creation timeout to `30m` [GH-2724] -* `azurerm_subnet` - fixing a crash when service endpoints was nil [GH-2742] +* `azurerm_api_management` - switching to use API version `2018-01-01` rather than `2018-06-01-preview` [GH-2958] +* `azurerm_batch_pool` - updating `max_tasks_per_node` to be ForceNew [GH-2856] +* `azurerm_key_vault_access_policy` - no longer silenty fails on creation of the `key_vault_id` property is invalid/doesn't exist [GH-2922] +* `azurerm_policy_definition` - making the `metadata` field to computed [GH-2939] +* `azurerm_redis_firewall_rule` - allowing underscores in the `name` field [GH-2906] + +## 1.22.1 (February 14, 2019) + +BUG FIXES: + +* `azurerm_key_vault_access_policy` - will no longer fail to find the Key Vault if `key_vault_id` is empty ([#2874](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2874)) +* `azurerm_key_vault_certificate` - will no longer fail to find the Key Vault if `key_vault_id` is ([#2874](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2874)) +* `azurerm_key_vault_key` - will no longer fail to find the Key Vault if `key_vault_id` is ([#2874](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2874)) +* `azurerm_key_vault_secret` - will no longer fail to find the Key Vault if `key_vault_id` is ([#2874](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2874)) +* `azurerm_storage_container` - support for large numbers of containers within a storage account ([#2873](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2873)) + +## 1.22.0 (February 11, 2019) + +UPGRADE NOTES: + +* The v1.22 release includes a few new resources which are duplicates of existing resources, the purpose of this is to correct some invalid naming so that we can remove the mis-named resources in the next major version of the Provider. Please see [the upgrade guide](https://www.terraform.io/docs/providers/azurerm/guides/migrating-between-renamed-resources.html) for more information on how to migrate between these resources. +* The `azurerm_builtin_role_definition` Data Source has been deprecated in favour of the `azurerm_role_definition` Data Source, which now provides the same functionality and will be removed in the next major version of the AzureRM Provider (2.0) ([#2798](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2798)) +* The `azurerm_log_analytics_workspace_linked_service` resource has been deprecated in favour of the (new) `azurerm_log_analytics_linked_service` resource and will be removed in the next major version of the AzureRM Provider (2.0) ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* The `azurerm_autoscale_setting` resource has been deprecated in favour of the (new) `azurerm_monitor_autoscale_setting` resource and will be removed in the next major version of the AzureRM Provider (2.0) ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* The `azurerm_metric_alertrule` resource has been deprecated in favour of the (new) `azurerm_monitor_metric_alertrule` resource and will be removed in the next major version of the AzureRM Provider (2.0) ([#2762](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2762)) + +FEATURES: + +* **New Data Source:** `azurerm_policy_definition` ([#2788](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2788)) +* **New Data Source:** `azurerm_servicebus_namespace` ([#2841](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2841)) +* **New Resource:** `azurerm_ddos_protection_plan` ([#2654](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2654)) +* **New Resource:** `azurerm_log_analytics_linked_service ` ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* **New Resource:** `azurerm_monitor_autoscale_setting` ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* **New Resource:** `azurerm_monitor_metric_alertrule` ([#2762](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2762)) +* **New Resource:** `azurerm_network_interface_application_security_group_association` ([#2789](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2789)) + +DEPRECATIONS: + +* Data Source `azurerm_key_vault_key` - deprecating the `vault_uri` property in favour of `key_vault_id` ([#2820](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2820)) +* Data Source `azurerm_key_vault_secret` - deprecating the `vault_uri` property in favour of `key_vault_id` ([#2820](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2820)) +* `azurerm_key_vault_certificate` - deprecating the `vault_uri` property in favour of `key_vault_id` ([#2820](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2820)) +* `azurerm_key_vault_key` - deprecating the `vault_uri` property in favour of `key_vault_id` ([#2820](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2820)) +* `azurerm_key_vault_access_policy` - deprecating the `vault_name` and `resource_group_name` properties in favour of `key_vault_id` ([#2820](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2820)) +* `azurerm_key_vault_secret` - deprecating the `vault_uri` property in favour of `key_vault_id` ([#2820](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2820)) +* `azurerm_application_gateway` - deprecating the `fqdn_list` field in favour of `fqdns` ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* `azurerm_application_gateway` - deprecating the `ip_address_list` field in favour of `ip_addresses` ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* `azurerm_builtin_role_definition` - deprecating in favour of the `azurerm_role_definition` data source, which now provides the same functionality ([#2798](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2798)) +* `azurerm_log_analytics_workspace_linked_service` - deprecating in favour of the (renamed) `azurerm_log_analytics_linked_service` resource ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* `azurerm_monitor_autoscale_setting` - deprecating in favour of the (renamed) `azurerm_autoscale_setting` resource ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* `azurerm_network_interface ` - deprecating the `application_security_group_ids` field in favour of the new `azurerm_network_interface_application_security_group_association` resource ([#2789](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2789)) + +IMPROVEMENTS: + +* dependencies: switching to Go Modules ([#2705](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2705)) +* dependencies: upgrading to v11.3.2 of github.com/Azure/go-autorest ([#2744](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2744)) +* Data Source: `azurerm_role_definition` - support for finding roles by name ([#2798](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2798)) +* `azurerm_application_gateway` - support for the `http2` property ([#2735](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2735)) +* `azurerm_application_gateway` - support for the `file_upload_limit_mb` property ([#2666](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2666)) +* `azurerm_application_gateway` - support for the `custom_error_configuration` property ([#2783](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2783)) +* `azurerm_application_gateway` - Support for `pick_host_name_from_backend_address` and `pick_host_name_from_backend_http_settings` properties ([#2658](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2658)) +* `azurerm_app_service` - support for the `client_cert_enabled` property ([#2765](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2765)) +* `azurerm_autoscale_setting` - support values from `0` to `1000` for the `minimum`, `maximum` and `default` properties ([#2815](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2815)) +* `azurerm_batch_pool` - support for the `max_tasks_per_node` property ([#2805](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2805)) +* `azurerm_cognitive_account` - exporting `primary_access_key` and `secondary_access_key` ([#2825](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2825)) +* `azurerm_cosmosdb_account` - support for the `EnableAggregationPipeline`, `MongoDBv3.4` and ` mongoEnableDocLevelTTL` capabilities ([#2715](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2715)) +* `azurerm_data_lake_store_file` - support file uploads greater then 4 megabytes ([#2633](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2633)) +* `azurerm_function_app` - support for linux via the `linux_fx_version` property ([#2767](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2767)) +* `azurerm_mssql_elasticpool` - support for setting `max_size_bytes` ([#2346](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2346)) +* `azurerm_mssql_elasticpool` - support for setting `max_size_gb` ([#2695](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2695)) +* `azurerm_postgresql_server` - support for version `10` and `10.2` ([#2768](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2768)) +* `azurerm_kubernetes_cluster` - add addtional validation ([#2772](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2772)) +* `azurerm_signalr_service` - exporting `primary_access_key`, `secondary_access_key`, `primary_connection_string` and `secondary_connection_string` and secondary access keys and connection strings ([#2655](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2655)) +* `azurerm_subnet` - support for additional subnet delegation types ([#2667](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2667)) + +BUG FIXES: + +* `azurerm_azuread_application` - fixing a bug where `reply_uris` was set incorrectly ([#2729](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2729)) +* `azurerm_batch_pool` - can now set multiple environment variables ([#2685](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2685)) +* `azurerm_cosmosdb_account` - prevent occasional error when deleting the resource ([#2702](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2702)) +* `azurerm_cosmosdb_account` - allow empty values for the `ip_range_filter` property ([#2713](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2713)) +* `azurerm_express_route_circuit` - added the `premium` SKU back to validation logic ([#2692](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2692)) +* `azurerm_firewall` - ensuring rules aren't removed during an update ([#2663](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2663)) +* `azurerm_notification_hub_namespace` - now polls on creation to handle eventual consistency ([#2701](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2701)) +* `azurerm_redis_cache` - locking on the Virtual Network/Subnet name to avoid a race condition ([#2725](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2725)) +* `azurerm_service_bus_subscription` - name's can now start with a digit ([#2672](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2672)) +* `azurerm_security_center` - increase the creation timeout to `30m` ([#2724](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2724)) +* `azurerm_service_fabric_cluster` - no longer pass `reverse_proxy_endpoint_port` to the API when not specified ([#2747](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2747)) +* `azurerm_subnet` - fixing a crash when service endpoints was nil ([#2742](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2742)) +* `azurerm_subnet` - will no longer lose service endpoints during a virtual network update ([#2738](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2738)) ## 1.21.0 (January 11, 2019) diff --git a/GNUmakefile b/GNUmakefile index 4c16b86bc5d3..22b826ed1b59 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -45,13 +45,13 @@ goimport: lint: @echo "==> Checking source code against linters..." - @gometalinter ./... + golangci-lint run ./... tools: @echo "==> installing required tooling..." @sh "$(CURDIR)/scripts/gogetcookie.sh" - GO111MODULE=off go get -u github.com/alecthomas/gometalinter - GO111MODULE=off gometalinter --install + GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell + GO111MODULE=off go get -u github.com/golangci/golangci-lint/cmd/golangci-lint test-compile: @if [ "$(TEST)" = "./..." ]; then \ diff --git a/README.md b/README.md index 3d1c284058ee..5019d020a501 100644 --- a/README.md +++ b/README.md @@ -108,3 +108,9 @@ The following Environment Variables must be set in your shell prior to running a - `ARM_TEST_LOCATION_ALT` **Note:** Acceptance tests create real resources in Azure which often cost money to run. + +Crosscompiling +-------------- +```sh +GOOS=windows GOARCH=amd64 make build +``` diff --git a/azurerm/config.go b/azurerm/config.go index 0664504fad9b..d6513f8145c9 100644 --- a/azurerm/config.go +++ b/azurerm/config.go @@ -10,6 +10,7 @@ import ( "time" resourcesprofile "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources" + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" appinsights "github.com/Azure/azure-sdk-for-go/services/appinsights/mgmt/2015-05-01/insights" "github.com/Azure/azure-sdk-for-go/services/automation/mgmt/2015-10-31/automation" "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2017-09-01/batch" @@ -25,21 +26,21 @@ import ( "github.com/Azure/azure-sdk-for-go/services/datalake/store/2016-11-01/filesystem" storeAccount "github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account" "github.com/Azure/azure-sdk-for-go/services/devtestlabs/mgmt/2016-05-15/dtl" - "github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid" "github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub" "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac" - "github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices" keyVault "github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault" "github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2018-02-14/keyvault" "github.com/Azure/azure-sdk-for-go/services/logic/mgmt/2016-06-01/logic" + "github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media" "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2017-12-01/mysql" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/Azure/azure-sdk-for-go/services/notificationhubs/mgmt/2017-04-01/notificationhubs" "github.com/Azure/azure-sdk-for-go/services/postgresql/mgmt/2017-12-01/postgresql" - "github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement" "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization" "github.com/Azure/azure-sdk-for-go/services/preview/devspaces/mgmt/2018-06-01-preview/devspaces" "github.com/Azure/azure-sdk-for-go/services/preview/dns/mgmt/2018-03-01-preview/dns" + "github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid" + "github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices" "github.com/Azure/azure-sdk-for-go/services/preview/mariadb/mgmt/2018-06-01-preview/mariadb" "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" "github.com/Azure/azure-sdk-for-go/services/preview/msi/mgmt/2015-08-31-preview/msi" @@ -112,6 +113,7 @@ type ArmClient struct { kubernetesClustersClient containerservice.ManagedClustersClient containerGroupsClient containerinstance.ContainerGroupsClient + eventGridDomainsClient eventgrid.DomainsClient eventGridTopicsClient eventgrid.TopicsClient eventHubClient eventhub.EventHubsClient eventHubConsumerGroupClient eventhub.ConsumerGroupsClient @@ -124,7 +126,10 @@ type ArmClient struct { redisPatchSchedulesClient redis.PatchSchedulesClient // API Management - apiManagementServiceClient apimanagement.ServiceClient + apiManagementGroupClient apimanagement.GroupClient + apiManagementProductsClient apimanagement.ProductClient + apiManagementServiceClient apimanagement.ServiceClient + apiManagementUsersClient apimanagement.UserClient // Application Insights appInsightsClient appinsights.ComponentsClient @@ -228,6 +233,9 @@ type ArmClient struct { managementGroupsClient managementgroups.Client managementGroupsSubscriptionClient managementgroups.SubscriptionsClient + // Media + mediaServicesClient media.MediaservicesClient + // Monitor monitorActionGroupsClient insights.ActionGroupsClient monitorActivityLogAlertsClient insights.ActivityLogAlertsClient @@ -244,6 +252,7 @@ type ArmClient struct { applicationGatewayClient network.ApplicationGatewaysClient applicationSecurityGroupsClient network.ApplicationSecurityGroupsClient azureFirewallsClient network.AzureFirewallsClient + connectionMonitorsClient network.ConnectionMonitorsClient ddosProtectionPlanClient network.DdosProtectionPlansClient expressRouteAuthsClient network.ExpressRouteCircuitAuthorizationsClient expressRouteCircuitClient network.ExpressRouteCircuitsClient @@ -456,6 +465,7 @@ func getArmClient(c *authentication.Config, skipProviderRegistration bool, partn client.registerEventHubClients(endpoint, c.SubscriptionID, auth) client.registerKeyVaultClients(endpoint, c.SubscriptionID, auth, keyVaultAuth) client.registerLogicClients(endpoint, c.SubscriptionID, auth) + client.registerMediaServiceClients(endpoint, c.SubscriptionID, auth) client.registerMonitorClients(endpoint, c.SubscriptionID, auth) client.registerNetworkingClients(endpoint, c.SubscriptionID, auth) client.registerNotificationHubsClient(endpoint, c.SubscriptionID, auth) @@ -480,9 +490,21 @@ func getArmClient(c *authentication.Config, skipProviderRegistration bool, partn } func (c *ArmClient) registerApiManagementServiceClients(endpoint, subscriptionId string, auth autorest.Authorizer) { - ams := apimanagement.NewServiceClientWithBaseURI(endpoint, subscriptionId) - c.configureClient(&ams.Client, auth) - c.apiManagementServiceClient = ams + groupsClient := apimanagement.NewGroupClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&groupsClient.Client, auth) + c.apiManagementGroupClient = groupsClient + + serviceClient := apimanagement.NewServiceClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&serviceClient.Client, auth) + c.apiManagementServiceClient = serviceClient + + productsClient := apimanagement.NewProductClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&productsClient.Client, auth) + c.apiManagementProductsClient = productsClient + + usersClient := apimanagement.NewUserClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&usersClient.Client, auth) + c.apiManagementUsersClient = usersClient } func (c *ArmClient) registerAppInsightsClients(endpoint, subscriptionId string, auth autorest.Authorizer) { @@ -587,6 +609,12 @@ func (c *ArmClient) registerCosmosDBClients(endpoint, subscriptionId string, aut c.cosmosDBClient = cdb } +func (c *ArmClient) registerMediaServiceClients(endpoint, subscriptionId string, auth autorest.Authorizer) { + mediaServicesClient := media.NewMediaservicesClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&mediaServicesClient.Client, auth) + c.mediaServicesClient = mediaServicesClient +} + func (c *ArmClient) registerComputeClients(endpoint, subscriptionId string, auth autorest.Authorizer) { availabilitySetsClient := compute.NewAvailabilitySetsClientWithBaseURI(endpoint, subscriptionId) c.configureClient(&availabilitySetsClient.Client, auth) @@ -830,6 +858,10 @@ func (c *ArmClient) registerEventGridClients(endpoint, subscriptionId string, au egtc := eventgrid.NewTopicsClientWithBaseURI(endpoint, subscriptionId) c.configureClient(&egtc.Client, auth) c.eventGridTopicsClient = egtc + + egdc := eventgrid.NewDomainsClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&egdc.Client, auth) + c.eventGridDomainsClient = egdc } func (c *ArmClient) registerEventHubClients(endpoint, subscriptionId string, auth autorest.Authorizer) { @@ -909,6 +941,10 @@ func (c *ArmClient) registerNetworkingClients(endpoint, subscriptionId string, a c.configureClient(&azureFirewallsClient.Client, auth) c.azureFirewallsClient = azureFirewallsClient + connectionMonitorsClient := network.NewConnectionMonitorsClientWithBaseURI(endpoint, subscriptionId) + c.configureClient(&connectionMonitorsClient.Client, auth) + c.connectionMonitorsClient = connectionMonitorsClient + ddosProtectionPlanClient := network.NewDdosProtectionPlansClientWithBaseURI(endpoint, subscriptionId) c.configureClient(&ddosProtectionPlanClient.Client, auth) c.ddosProtectionPlanClient = ddosProtectionPlanClient diff --git a/azurerm/data_source_api_management.go b/azurerm/data_source_api_management.go index 9ada4abce21c..c17e8e367071 100644 --- a/azurerm/data_source_api_management.go +++ b/azurerm/data_source_api_management.go @@ -4,9 +4,9 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement" + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" "github.com/hashicorp/terraform/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -15,11 +15,7 @@ func dataSourceApiManagementService() *schema.Resource { Read: dataSourceApiManagementRead, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validate.ApiManagementServiceName, - }, + "name": azure.SchemaApiManagementDataSourceName(), "resource_group_name": resourceGroupNameForDataSourceSchema(), diff --git a/azurerm/data_source_api_management_group.go b/azurerm/data_source_api_management_group.go new file mode 100644 index 000000000000..9c296b389d27 --- /dev/null +++ b/azurerm/data_source_api_management_group.go @@ -0,0 +1,79 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceApiManagementGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceApiManagementGroupRead, + + Schema: map[string]*schema.Schema{ + "name": azure.SchemaApiManagementChildDataSourceName(), + + "resource_group_name": resourceGroupNameForDataSourceSchema(), + + "api_management_name": azure.SchemaApiManagementDataSourceName(), + + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "external_id": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceApiManagementGroupRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementGroupClient + ctx := meta.(*ArmClient).StopContext + + resourceGroup := d.Get("resource_group_name").(string) + serviceName := d.Get("api_management_name").(string) + name := d.Get("name").(string) + + resp, err := client.Get(ctx, resourceGroup, serviceName, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Group %q (Resource Group %q / API Management Service %q) was not found - removing from state!", name, resourceGroup, serviceName) + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request for Group %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + } + + d.SetId(*resp.ID) + + d.Set("name", resp.Name) + d.Set("resource_group_name", resourceGroup) + d.Set("api_management_name", serviceName) + + if properties := resp.GroupContractProperties; properties != nil { + d.Set("display_name", properties.DisplayName) + d.Set("description", properties.Description) + d.Set("external_id", properties.ExternalID) + d.Set("type", string(properties.Type)) + } + + return nil +} diff --git a/azurerm/data_source_api_management_group_test.go b/azurerm/data_source_api_management_group_test.go new file mode 100644 index 000000000000..dfc75814e3e9 --- /dev/null +++ b/azurerm/data_source_api_management_group_test.go @@ -0,0 +1,66 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccDataSourceAzureRMApiManagementGroup_basic(t *testing.T) { + dataSourceName := "data.azurerm_api_management_group.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceApiManagementGroup_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "display_name", "Test Group"), + resource.TestCheckResourceAttr(dataSourceName, "description", ""), + resource.TestCheckResourceAttr(dataSourceName, "external_id", ""), + resource.TestCheckResourceAttr(dataSourceName, "type", "custom"), + ), + }, + }, + }) +} + +func testAccDataSourceApiManagementGroup_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_group" "test" { + name = "acctestAMGroup-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + api_management_name = "${azurerm_api_management.test.name}" + display_name = "Test Group" +} + +data "azurerm_api_management_group" "test" { + name = "${azurerm_api_management_group.test.name}" + api_management_name = "${azurerm_api_management_group.test.api_management_name}" + resource_group_name = "${azurerm_api_management_group.test.resource_group_name}" +} +`, rInt, location, rInt, rInt) +} diff --git a/azurerm/data_source_api_management_product.go b/azurerm/data_source_api_management_product.go new file mode 100644 index 000000000000..6cde7a277d94 --- /dev/null +++ b/azurerm/data_source_api_management_product.go @@ -0,0 +1,90 @@ +package azurerm + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceApiManagementProduct() *schema.Resource { + return &schema.Resource{ + Read: dataSourceApiManagementProductRead, + + Schema: map[string]*schema.Schema{ + "product_id": azure.SchemaApiManagementChildDataSourceName(), + + "api_management_name": azure.SchemaApiManagementDataSourceName(), + + "resource_group_name": resourceGroupNameSchema(), + + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + + "subscription_required": { + Type: schema.TypeBool, + Computed: true, + }, + + "approval_required": { + Type: schema.TypeBool, + Computed: true, + }, + + "published": { + Type: schema.TypeBool, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "terms": { + Type: schema.TypeString, + Computed: true, + }, + + "subscriptions_limit": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} +func dataSourceApiManagementProductRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementProductsClient + ctx := meta.(*ArmClient).StopContext + + resourceGroup := d.Get("resource_group_name").(string) + serviceName := d.Get("api_management_name").(string) + productId := d.Get("product_id").(string) + + resp, err := client.Get(ctx, resourceGroup, serviceName, productId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Product %q was not found in API Management Service %q / Resource Group %q", productId, serviceName, resourceGroup) + } + + return fmt.Errorf("Error making Read request on Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + } + + d.SetId(*resp.ID) + + if props := resp.ProductContractProperties; props != nil { + d.Set("approval_required", props.ApprovalRequired) + d.Set("description", props.Description) + d.Set("display_name", props.DisplayName) + d.Set("published", props.State == apimanagement.Published) + d.Set("subscriptions_limit", props.SubscriptionsLimit) + d.Set("subscription_required", props.SubscriptionRequired) + d.Set("terms", props.Terms) + } + + return nil +} diff --git a/azurerm/data_source_api_management_product_test.go b/azurerm/data_source_api_management_product_test.go new file mode 100644 index 000000000000..17293d2d300d --- /dev/null +++ b/azurerm/data_source_api_management_product_test.go @@ -0,0 +1,76 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccDataSourceAzureRMApiManagementProduct_basic(t *testing.T) { + dataSourceName := "data.azurerm_api_management_product.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceApiManagementProduct_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "product_id", "test-product"), + resource.TestCheckResourceAttr(dataSourceName, "display_name", "Test Product"), + resource.TestCheckResourceAttr(dataSourceName, "subscription_required", "true"), + resource.TestCheckResourceAttr(dataSourceName, "approval_required", "true"), + resource.TestCheckResourceAttr(dataSourceName, "published", "true"), + resource.TestCheckResourceAttr(dataSourceName, "description", "This is an example description"), + resource.TestCheckResourceAttr(dataSourceName, "terms", "These are some example terms and conditions"), + ), + }, + }, + }) +} + +func testAccDataSourceApiManagementProduct_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "amtestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } + + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + + +resource "azurerm_api_management_product" "test" { + product_id = "test-product" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + display_name = "Test Product" + subscription_required = true + approval_required = true + published = true + description = "This is an example description" + terms = "These are some example terms and conditions" +} + +data "azurerm_api_management_product" "test" { + product_id = "${azurerm_api_management_product.test.product_id}" + api_management_name = "${azurerm_api_management_product.test.api_management_name}" + resource_group_name = "${azurerm_api_management_product.test.resource_group_name}" +} +`, rInt, location, rInt) +} diff --git a/azurerm/data_source_api_management_user.go b/azurerm/data_source_api_management_user.go new file mode 100644 index 000000000000..350844ff2d65 --- /dev/null +++ b/azurerm/data_source_api_management_user.go @@ -0,0 +1,78 @@ +package azurerm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceArmApiManagementUser() *schema.Resource { + return &schema.Resource{ + Read: dataSourceArmApiManagementUserRead, + + Schema: map[string]*schema.Schema{ + "user_id": azure.SchemaApiManagementUserDataSourceName(), + + "api_management_name": azure.SchemaApiManagementDataSourceName(), + + "resource_group_name": resourceGroupNameForDataSourceSchema(), + + "first_name": { + Type: schema.TypeString, + Computed: true, + }, + + "email": { + Type: schema.TypeString, + Computed: true, + }, + + "last_name": { + Type: schema.TypeString, + Computed: true, + }, + + "note": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceArmApiManagementUserRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementUsersClient + ctx := meta.(*ArmClient).StopContext + + resourceGroup := d.Get("resource_group_name").(string) + serviceName := d.Get("api_management_name").(string) + userId := d.Get("user_id").(string) + + resp, err := client.Get(ctx, resourceGroup, serviceName, userId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("User %q was not found in API Management Service %q / Resource Group %q", userId, serviceName, resourceGroup) + } + + return fmt.Errorf("Error making Read request on User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + } + + d.SetId(*resp.ID) + + if props := resp.UserContractProperties; props != nil { + d.Set("first_name", props.FirstName) + d.Set("last_name", props.LastName) + d.Set("email", props.Email) + d.Set("note", props.Note) + d.Set("state", string(props.State)) + } + + return nil +} diff --git a/azurerm/data_source_api_management_user_test.go b/azurerm/data_source_api_management_user_test.go new file mode 100644 index 000000000000..c9c808566d82 --- /dev/null +++ b/azurerm/data_source_api_management_user_test.go @@ -0,0 +1,74 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccDataSourceAzureRMApiManagementUser_basic(t *testing.T) { + dataSourceName := "data.azurerm_api_management_user.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceApiManagementUser_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "user_id", "test-user"), + resource.TestCheckResourceAttr(dataSourceName, "first_name", "Acceptance"), + resource.TestCheckResourceAttr(dataSourceName, "last_name", "Test"), + resource.TestCheckResourceAttr(dataSourceName, "email", fmt.Sprintf("azure-acctest%d@hashicorptest.com", rInt)), + resource.TestCheckResourceAttr(dataSourceName, "state", "active"), + resource.TestCheckResourceAttr(dataSourceName, "note", "Used for testing in dimension C-137."), + ), + }, + }, + }) +} + +func testAccDataSourceApiManagementUser_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "amtestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } + + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + + +resource "azurerm_api_management_user" "test" { + user_id = "test-user" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test" + email = "azure-acctest%d@example.com" + state = "active" + note = "Used for testing in dimension C-137." +} + +data "azurerm_api_management_user" "test" { + user_id = "${azurerm_api_management_user.test.user_id}" + api_management_name = "${azurerm_api_management_user.test.api_management_name}" + resource_group_name = "${azurerm_api_management_user.test.resource_group_name}" +} +`, rInt, location, rInt, rInt) +} diff --git a/azurerm/data_source_app_service.go b/azurerm/data_source_app_service.go index d029eafdc3e5..445306507990 100644 --- a/azurerm/data_source_app_service.go +++ b/azurerm/data_source_app_service.go @@ -45,6 +45,11 @@ func dataSourceArmAppService() *schema.Resource { Computed: true, }, + "client_cert_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "app_settings": { Type: schema.TypeMap, Computed: true, @@ -187,6 +192,7 @@ func dataSourceArmAppServiceRead(d *schema.ResourceData, meta interface{}) error d.Set("client_affinity_enabled", props.ClientAffinityEnabled) d.Set("enabled", props.Enabled) d.Set("https_only", props.HTTPSOnly) + d.Set("client_cert_enabled", props.ClientCertEnabled) d.Set("default_site_hostname", props.DefaultHostName) d.Set("outbound_ip_addresses", props.OutboundIPAddresses) d.Set("possible_outbound_ip_addresses", props.PossibleOutboundIPAddresses) diff --git a/azurerm/data_source_app_service_plan_test.go b/azurerm/data_source_app_service_plan_test.go index 5cdd069b95ec..150ba6e05823 100644 --- a/azurerm/data_source_app_service_plan_test.go +++ b/azurerm/data_source_app_service_plan_test.go @@ -108,7 +108,7 @@ resource "azurerm_app_service_plan" "test" { per_site_scaling = true } - tags { + tags = { environment = "Test" } } diff --git a/azurerm/data_source_application_insights_test.go b/azurerm/data_source_application_insights_test.go index e5a3f40bdbf0..feac4cfebdd0 100644 --- a/azurerm/data_source_application_insights_test.go +++ b/azurerm/data_source_application_insights_test.go @@ -46,7 +46,7 @@ resource "azurerm_application_insights" "test" { resource_group_name = "${azurerm_resource_group.test.name}" application_type = "other" - tags { + tags = { "foo" = "bar" } } diff --git a/azurerm/data_source_application_security_group_test.go b/azurerm/data_source_application_security_group_test.go index ea0d5b96f176..23c561d0a5b6 100644 --- a/azurerm/data_source_application_security_group_test.go +++ b/azurerm/data_source_application_security_group_test.go @@ -81,7 +81,7 @@ resource "azurerm_application_security_group" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Hello" = "World" } } diff --git a/azurerm/data_source_availability_set.go b/azurerm/data_source_availability_set.go new file mode 100644 index 000000000000..0b2905d271a9 --- /dev/null +++ b/azurerm/data_source_availability_set.go @@ -0,0 +1,79 @@ +package azurerm + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceArmAvailabilitySet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceArmAvailabilitySetRead, + Schema: map[string]*schema.Schema{ + "resource_group_name": resourceGroupNameForDataSourceSchema(), + + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "location": { + Type: schema.TypeString, + Computed: true, + }, + + "platform_update_domain_count": { + Type: schema.TypeString, + Computed: true, + }, + + "platform_fault_domain_count": { + Type: schema.TypeString, + Computed: true, + }, + + "managed": { + Type: schema.TypeBool, + Computed: true, + }, + + "tags": tagsForDataSourceSchema(), + }, + } +} + +func dataSourceArmAvailabilitySetRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).availSetClient + ctx := meta.(*ArmClient).StopContext + + resGroup := d.Get("resource_group_name").(string) + name := d.Get("name").(string) + + resp, err := client.Get(ctx, resGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Error: Availability Set %q (Resource Group %q) was not found", name, resGroup) + } + + return fmt.Errorf("Error making Read request on Availability Set %q (Resource Group %q): %+v", name, resGroup, err) + } + + d.SetId(*resp.ID) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + if resp.Sku != nil && resp.Sku.Name != nil { + d.Set("managed", strings.EqualFold(*resp.Sku.Name, "Aligned")) + } + if props := resp.AvailabilitySetProperties; props != nil { + d.Set("platform_update_domain_count", props.PlatformUpdateDomainCount) + d.Set("platform_fault_domain_count", props.PlatformFaultDomainCount) + } + flattenAndSetTags(d, resp.Tags) + + return nil +} diff --git a/azurerm/data_source_availability_set_test.go b/azurerm/data_source_availability_set_test.go new file mode 100644 index 000000000000..12d4893443ad --- /dev/null +++ b/azurerm/data_source_availability_set_test.go @@ -0,0 +1,56 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDataSourceAvailabilitySet_basic(t *testing.T) { + dataSourceName := "data.azurerm_availability_set.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAvailabilitySet_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "location"), + resource.TestCheckResourceAttrSet(dataSourceName, "name"), + resource.TestCheckResourceAttrSet(dataSourceName, "resource_group_name"), + resource.TestCheckResourceAttr(dataSourceName, "tags.%", "1"), + ), + }, + }, + }) +} + +func testAccDataSourceAvailabilitySet_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +resource "azurerm_availability_set" "test" { + name = "acctestavset-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + "foo" = "bar" + } +} + +data "azurerm_availability_set" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + name = "${azurerm_availability_set.test.name}" +} +`, rInt, location) +} diff --git a/azurerm/data_source_batch_account.go b/azurerm/data_source_batch_account.go index 2b2e738e66bd..f3de32673217 100644 --- a/azurerm/data_source_batch_account.go +++ b/azurerm/data_source_batch_account.go @@ -41,7 +41,6 @@ func dataSourceArmBatchAccountRead(d *schema.ResourceData, meta interface{}) err ctx := meta.(*ArmClient).StopContext resp, err := client.Get(ctx, resourceGroup, name) - if err != nil { if utils.ResponseWasNotFound(resp.Response) { return fmt.Errorf("Error: Batch account %q (Resource Group %q) was not found", name, resourceGroup) diff --git a/azurerm/data_source_batch_account_test.go b/azurerm/data_source_batch_account_test.go index 975888caff46..3e6a110538e7 100644 --- a/azurerm/data_source_batch_account_test.go +++ b/azurerm/data_source_batch_account_test.go @@ -100,7 +100,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } diff --git a/azurerm/data_source_batch_pool.go b/azurerm/data_source_batch_pool.go index ac0dfef6dbca..32204d397570 100644 --- a/azurerm/data_source_batch_pool.go +++ b/azurerm/data_source_batch_pool.go @@ -32,6 +32,10 @@ func dataSourceArmBatchPool() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "max_tasks_per_node": { + Type: schema.TypeInt, + Computed: true, + }, "fixed_scale": { Type: schema.TypeList, Computed: true, @@ -191,6 +195,7 @@ func dataSourceArmBatchPoolRead(d *schema.ResourceData, meta interface{}) error if props := resp.PoolProperties; props != nil { d.Set("vm_size", props.VMSize) + d.Set("max_tasks_per_node", props.MaxTasksPerNode) if scaleSettings := props.ScaleSettings; scaleSettings != nil { if err := d.Set("auto_scale", azure.FlattenBatchPoolAutoScaleSettings(scaleSettings.AutoScale)); err != nil { diff --git a/azurerm/data_source_batch_pool_test.go b/azurerm/data_source_batch_pool_test.go index 463902acce27..06cb3214d8de 100644 --- a/azurerm/data_source_batch_pool_test.go +++ b/azurerm/data_source_batch_pool_test.go @@ -37,6 +37,7 @@ func TestAccDataSourceAzureRMBatchPool_complete(t *testing.T) { resource.TestCheckResourceAttr(dataSourceName, "fixed_scale.0.resize_timeout", "PT15M"), resource.TestCheckResourceAttr(dataSourceName, "fixed_scale.0.target_low_priority_nodes", "0"), resource.TestCheckResourceAttr(dataSourceName, "node_agent_sku_id", "batch.node.ubuntu 16.04"), + resource.TestCheckResourceAttr(dataSourceName, "max_tasks_per_node", "2"), resource.TestCheckResourceAttr(dataSourceName, "start_task.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "start_task.0.max_task_retry_count", "1"), resource.TestCheckResourceAttr(dataSourceName, "start_task.0.environment.%", "1"), @@ -73,7 +74,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } @@ -85,6 +86,7 @@ resource "azurerm_batch_pool" "test" { display_name = "Test Acc Pool" vm_size = "Standard_A1" node_agent_sku_id = "batch.node.ubuntu 16.04" + max_tasks_per_node = 2 fixed_scale { target_dedicated_nodes = 2 @@ -103,7 +105,7 @@ resource "azurerm_batch_pool" "test" { max_task_retry_count = 1 wait_for_success = true - environment { + environment = { env = "TEST" } diff --git a/azurerm/data_source_builtin_role_definition.go b/azurerm/data_source_builtin_role_definition.go index b9cfeb1e07ba..0b6af0a8a1af 100644 --- a/azurerm/data_source_builtin_role_definition.go +++ b/azurerm/data_source_builtin_role_definition.go @@ -10,6 +10,12 @@ import ( func dataSourceArmBuiltInRoleDefinition() *schema.Resource { return &schema.Resource{ Read: dataSourceArmBuiltInRoleDefinitionRead, + + DeprecationMessage: `This Data Source has been deprecated in favour of the 'azurerm_role_definition' resource that now can look up role definitions by names. + +As such this Data Source will be removed in v2.0 of the AzureRM Provider. +`, + Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, diff --git a/azurerm/data_source_cdn_profile_test.go b/azurerm/data_source_cdn_profile_test.go index 092a69d77d45..ace235b9d5f6 100644 --- a/azurerm/data_source_cdn_profile_test.go +++ b/azurerm/data_source_cdn_profile_test.go @@ -86,7 +86,7 @@ resource "azurerm_cdn_profile" "test" { resource_group_name = "${azurerm_resource_group.test.name}" sku = "Standard_Verizon" - tags { + tags = { environment = "Production" cost_center = "MSFT" } diff --git a/azurerm/data_source_data_lake_store_test.go b/azurerm/data_source_data_lake_store_test.go index 34f822d47697..05d545466a13 100644 --- a/azurerm/data_source_data_lake_store_test.go +++ b/azurerm/data_source_data_lake_store_test.go @@ -85,7 +85,7 @@ resource "azurerm_data_lake_store" "test" { tier = "Commitment_1TB" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { hello = "world" } } diff --git a/azurerm/data_source_dev_test_lab_test.go b/azurerm/data_source_dev_test_lab_test.go index 1400f8977568..fd54694e6d2a 100644 --- a/azurerm/data_source_dev_test_lab_test.go +++ b/azurerm/data_source_dev_test_lab_test.go @@ -82,7 +82,7 @@ resource "azurerm_dev_test_lab" "test" { resource_group_name = "${azurerm_resource_group.test.name}" storage_type = "Standard" - tags { + tags = { "Hello" = "World" } } diff --git a/azurerm/data_source_dns_zone_test.go b/azurerm/data_source_dns_zone_test.go index a1a98d004601..09ce1d2df2e4 100644 --- a/azurerm/data_source_dns_zone_test.go +++ b/azurerm/data_source_dns_zone_test.go @@ -100,7 +100,7 @@ resource "azurerm_dns_zone" "test" { name = "acctestzone%d.com" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { hello = "world" } } diff --git a/azurerm/data_source_image_test.go b/azurerm/data_source_image_test.go index 230d0af16436..e33945e10393 100644 --- a/azurerm/data_source_image_test.go +++ b/azurerm/data_source_image_test.go @@ -115,7 +115,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -159,7 +159,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -178,7 +178,7 @@ resource "azurerm_image" "test" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -244,7 +244,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -288,7 +288,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -307,7 +307,7 @@ resource "azurerm_image" "abc" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -326,7 +326,7 @@ resource "azurerm_image" "def" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } diff --git a/azurerm/data_source_key_vault_key.go b/azurerm/data_source_key_vault_key.go index a834102b6813..f7b39e9d16be 100644 --- a/azurerm/data_source_key_vault_key.go +++ b/azurerm/data_source_key_vault_key.go @@ -20,10 +20,24 @@ func dataSourceArmKeyVaultKey() *schema.Resource { ValidateFunc: azure.ValidateKeyVaultChildName, }, + "key_vault_id": { + Type: schema.TypeString, + Optional: true, //todo required in 2.0 + Computed: true, //todo removed in 2.0 + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"vault_uri"}, + }, + + //todo remove in 2.0 "vault_uri": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validate.URLIsHTTPS, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated in favour of the key_vault_id property. This will prevent a class of bugs as described in https://github.com/terraform-providers/terraform-provider-azurerm/issues/2396 and will be removed in version 2.0 of the provider", + ValidateFunc: validate.URLIsHTTPS, + ConflictsWith: []string{"key_vault_id"}, }, "key_type": { @@ -65,16 +79,41 @@ func dataSourceArmKeyVaultKey() *schema.Resource { } func dataSourceArmKeyVaultKeyRead(d *schema.ResourceData, meta interface{}) error { + vaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext - vaultUri := d.Get("vault_uri").(string) + keyVaultBaseUri := d.Get("vault_uri").(string) name := d.Get("name").(string) + keyVaultId := d.Get("key_vault_id").(string) + + if keyVaultBaseUri == "" { + if keyVaultId == "" { + return fmt.Errorf("one of `key_vault_id` or `vault_uri` must be set") + } + + pKeyVaultBaseUrl, err := azure.GetKeyVaultBaseUrlFromID(ctx, vaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error looking up Key %q vault url from id %q: %+v", name, keyVaultId, err) + } + + keyVaultBaseUri = pKeyVaultBaseUrl + d.Set("vault_uri", keyVaultBaseUri) + } else { + id, err := azure.GetKeyVaultIDFromBaseUrl(ctx, vaultClient, keyVaultBaseUri) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", keyVaultBaseUri, err) + } + if id == nil { + return fmt.Errorf("Unable to locate the Resource ID for the Key Vault at URL %q: %s", keyVaultBaseUri, err) + } + d.Set("key_vault_id", id) + } - resp, err := client.GetKey(ctx, vaultUri, name, "") + resp, err := client.GetKey(ctx, keyVaultBaseUri, name, "") if err != nil { if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Key %q was not found in Key Vault at URI %q", name, vaultUri) + return fmt.Errorf("Key %q was not found in Key Vault at URI %q", name, keyVaultBaseUri) } return err diff --git a/azurerm/data_source_key_vault_key_test.go b/azurerm/data_source_key_vault_key_test.go index 9208b711e164..4bd658a55c94 100644 --- a/azurerm/data_source_key_vault_key_test.go +++ b/azurerm/data_source_key_vault_key_test.go @@ -31,12 +31,13 @@ func TestAccDataSourceAzureRMKeyVaultKey_complete(t *testing.T) { } func testAccDataSourceKeyVaultKey_complete(rString string, location string) string { + t := testAccAzureRMKeyVaultKey_complete(rString, location) return fmt.Sprintf(` %s data "azurerm_key_vault_key" "test" { - name = "${azurerm_key_vault_key.test.name}" - vault_uri = "${azurerm_key_vault_key.test.vault_uri}" + name = "${azurerm_key_vault_key.test.name}" + key_vault_id = "${azurerm_key_vault.test.id}" } -`, testAccAzureRMKeyVaultKey_complete(rString, location)) +`, t) } diff --git a/azurerm/data_source_key_vault_secret.go b/azurerm/data_source_key_vault_secret.go index 3d5bc9ec8c11..98ab090d01ca 100644 --- a/azurerm/data_source_key_vault_secret.go +++ b/azurerm/data_source_key_vault_secret.go @@ -21,11 +21,24 @@ func dataSourceArmKeyVaultSecret() *schema.Resource { ValidateFunc: azure.ValidateKeyVaultChildName, }, + "key_vault_id": { + Type: schema.TypeString, + Optional: true, //todo required in 2.0 + Computed: true, //todo removed in 2.0 + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"vault_uri"}, + }, + + //todo remove in 2.0 "vault_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.URLIsHTTPS, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated in favour of the key_vault_id property. This will prevent a class of bugs as described in https://github.com/terraform-providers/terraform-provider-azurerm/issues/2396 and will be removed in version 2.0 of the provider", + ValidateFunc: validate.URLIsHTTPS, + ConflictsWith: []string{"key_vault_id"}, }, "value": { @@ -50,17 +63,43 @@ func dataSourceArmKeyVaultSecret() *schema.Resource { } func dataSourceArmKeyVaultSecretRead(d *schema.ResourceData, meta interface{}) error { + vaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext name := d.Get("name").(string) - vaultUri := d.Get("vault_uri").(string) + keyVaultBaseUri := d.Get("vault_uri").(string) + keyVaultId := d.Get("key_vault_id").(string) + + if keyVaultBaseUri == "" { + if keyVaultId == "" { + return fmt.Errorf("one of `key_vault_id` or `vault_uri` must be set") + } + + pKeyVaultBaseUrl, err := azure.GetKeyVaultBaseUrlFromID(ctx, vaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error looking up Secret %q vault url form id %q: %+v", name, keyVaultId, err) + } + + keyVaultBaseUri = pKeyVaultBaseUrl + d.Set("vault_uri", keyVaultBaseUri) + } else { + id, err := azure.GetKeyVaultIDFromBaseUrl(ctx, vaultClient, keyVaultBaseUri) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", keyVaultBaseUri, err) + } + if id == nil { + return fmt.Errorf("Unable to locate the Resource ID for the Key Vault at URL %q: %s", keyVaultBaseUri, err) + } + + d.Set("key_vault_id", id) + } // we always want to get the latest version - resp, err := client.GetSecret(ctx, vaultUri, name, "") + resp, err := client.GetSecret(ctx, keyVaultBaseUri, name, "") if err != nil { if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("KeyVault Secret %q (KeyVault URI %q) does not exist", name, vaultUri) + return fmt.Errorf("KeyVault Secret %q (KeyVault URI %q) does not exist", name, keyVaultBaseUri) } return fmt.Errorf("Error making Read request on Azure KeyVault Secret %s: %+v", name, err) } diff --git a/azurerm/data_source_key_vault_secret_test.go b/azurerm/data_source_key_vault_secret_test.go index fda4d5b58896..58f1e77305ff 100644 --- a/azurerm/data_source_key_vault_secret_test.go +++ b/azurerm/data_source_key_vault_secret_test.go @@ -59,8 +59,8 @@ func testAccDataSourceKeyVaultSecret_basic(rString string, location string) stri %s data "azurerm_key_vault_secret" "test" { - name = "${azurerm_key_vault_secret.test.name}" - vault_uri = "${azurerm_key_vault_secret.test.vault_uri}" + name = "${azurerm_key_vault_secret.test.name}" + key_vault_id = "${azurerm_key_vault.test.id}" } `, r) } @@ -71,8 +71,8 @@ func testAccDataSourceKeyVaultSecret_complete(rString string, location string) s %s data "azurerm_key_vault_secret" "test" { - name = "${azurerm_key_vault_secret.test.name}" - vault_uri = "${azurerm_key_vault_secret.test.vault_uri}" + name = "${azurerm_key_vault_secret.test.name}" + key_vault_id = "${azurerm_key_vault.test.id}" } `, r) } diff --git a/azurerm/data_source_managed_disk_test.go b/azurerm/data_source_managed_disk_test.go index 5e31a9da4328..edfae14365c1 100644 --- a/azurerm/data_source_managed_disk_test.go +++ b/azurerm/data_source_managed_disk_test.go @@ -54,7 +54,7 @@ resource "azurerm_managed_disk" "test" { disk_size_gb = "10" zones = ["2"] - tags { + tags = { environment = "acctest" } } diff --git a/azurerm/data_source_monitor_diagnostic_categories_test.go b/azurerm/data_source_monitor_diagnostic_categories_test.go index 95db129c0207..6ac94c0598d9 100644 --- a/azurerm/data_source_monitor_diagnostic_categories_test.go +++ b/azurerm/data_source_monitor_diagnostic_categories_test.go @@ -94,7 +94,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "GRS" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/data_source_network_interface_test.go b/azurerm/data_source_network_interface_test.go index 83e6268eaff6..b4e86bc1b85d 100644 --- a/azurerm/data_source_network_interface_test.go +++ b/azurerm/data_source_network_interface_test.go @@ -73,7 +73,7 @@ resource "azurerm_network_interface" "test" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "staging" } } @@ -119,7 +119,7 @@ resource "azurerm_network_interface" "test" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/data_source_network_security_group_test.go b/azurerm/data_source_network_security_group_test.go index 0113f604e05f..87597cec0770 100644 --- a/azurerm/data_source_network_security_group_test.go +++ b/azurerm/data_source_network_security_group_test.go @@ -151,7 +151,7 @@ resource "azurerm_network_security_group" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/data_source_network_watcher.go b/azurerm/data_source_network_watcher.go new file mode 100644 index 000000000000..5cfb25b36686 --- /dev/null +++ b/azurerm/data_source_network_watcher.go @@ -0,0 +1,54 @@ +package azurerm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceArmNetworkWatcher() *schema.Resource { + return &schema.Resource{ + Read: dataSourceArmNetworkWatcherRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "resource_group_name": resourceGroupNameForDataSourceSchema(), + "location": locationForDataSourceSchema(), + "tags": tagsForDataSourceSchema(), + }, + } +} + +func dataSourceArmNetworkWatcherRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).watcherClient + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + ctx := meta.(*ArmClient).StopContext + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Error: Network Watcher %q (Resource Group %q) was not found", name, resourceGroup) + } + return fmt.Errorf("Error making Read request on Network Watcher %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.SetId(*resp.ID) + + d.Set("name", name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + flattenAndSetTags(d, resp.Tags) + + return nil +} diff --git a/azurerm/data_source_network_watcher_test.go b/azurerm/data_source_network_watcher_test.go new file mode 100644 index 000000000000..ea60cbfd8938 --- /dev/null +++ b/azurerm/data_source_network_watcher_test.go @@ -0,0 +1,59 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func testAccDataSourceAzureRMNetworkWatcher_basic(t *testing.T) { + dataSourceName := "data.azurerm_network_watcher.test" + + ri := tf.AccRandTimeInt() + name := fmt.Sprintf("acctestnw-%d", ri) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAzureRMNetworkWatcher_basicConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "id"), + resource.TestCheckResourceAttr(dataSourceName, "name", name), + resource.TestCheckResourceAttrSet(dataSourceName, "resource_group_name"), + resource.TestCheckResourceAttr(dataSourceName, "location", azureRMNormalizeLocation(location)), + resource.TestCheckResourceAttr(dataSourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(dataSourceName, "tags.env", "test"), + ), + }, + }, + }) +} + +func testAccDataSourceAzureRMNetworkWatcher_basicConfig(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "%s" +} + +resource "azurerm_network_watcher" "test" { + name = "acctestnw-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + env = "test" + } +} + +data "azurerm_network_watcher" "test" { + name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +`, rInt, location, rInt) +} diff --git a/azurerm/data_source_policy_definition.go b/azurerm/data_source_policy_definition.go new file mode 100644 index 000000000000..006a91617f6d --- /dev/null +++ b/azurerm/data_source_policy_definition.go @@ -0,0 +1,117 @@ +package azurerm + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/policy" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" +) + +func dataSourceArmPolicyDefinition() *schema.Resource { + return &schema.Resource{ + Read: dataSourceArmPolicyDefinitionRead, + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "management_group_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceIDOrEmpty, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "policy_type": { + Type: schema.TypeString, + Computed: true, + }, + "policy_rule": { + Type: schema.TypeString, + Computed: true, + }, + "parameters": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceArmPolicyDefinitionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).policyDefinitionsClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("display_name").(string) + managementGroupID := d.Get("management_group_id").(string) + + var policyDefinitions policy.DefinitionListResultIterator + var err error + + if managementGroupID != "" { + policyDefinitions, err = client.ListByManagementGroupComplete(ctx, managementGroupID) + } else { + policyDefinitions, err = client.ListComplete(ctx) + } + + if err != nil { + return fmt.Errorf("Error loading Policy Definition List: %+v", err) + } + + var policyDefinition policy.Definition + + for policyDefinitions.NotDone() { + def := policyDefinitions.Value() + if def.DisplayName != nil && *def.DisplayName == name { + policyDefinition = def + break + } + + err = policyDefinitions.NextWithContext(ctx) + if err != nil { + return fmt.Errorf("Error loading Policy Definition List: %s", err) + } + } + + if policyDefinition.ID == nil { + return fmt.Errorf("Error loading Policy Definition List: could not find policy '%s'", name) + } + + d.SetId(*policyDefinition.ID) + d.Set("name", policyDefinition.Name) + d.Set("display_name", policyDefinition.DisplayName) + d.Set("description", policyDefinition.Description) + d.Set("type", policyDefinition.Type) + d.Set("policy_type", policyDefinition.PolicyType) + + if policyRuleStr := flattenJSON(policyDefinition.PolicyRule); policyRuleStr != "" { + d.Set("policy_rule", policyRuleStr) + } + + if metadataStr := flattenJSON(policyDefinition.Metadata); metadataStr != "" { + d.Set("metadata", metadataStr) + } + + if parametersStr := flattenJSON(policyDefinition.Parameters); parametersStr != "" { + d.Set("parameters", parametersStr) + } + + return nil +} diff --git a/azurerm/data_source_policy_definition_test.go b/azurerm/data_source_policy_definition_test.go new file mode 100644 index 000000000000..295eb16b993b --- /dev/null +++ b/azurerm/data_source_policy_definition_test.go @@ -0,0 +1,145 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccDataSourceAzureRMPolicyDefinition_builtIn(t *testing.T) { + dataSourceName := "data.azurerm_policy_definition.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceBuiltInPolicyDefinition("Allowed resource types"), + Check: resource.ComposeTestCheckFunc( + testAzureRMClientConfigAttr(dataSourceName, "id", "/providers/Microsoft.Authorization/policyDefinitions/a08ec900-254a-4555-9bf5-e42af04b5c5c"), + testAzureRMClientConfigAttr(dataSourceName, "name", "a08ec900-254a-4555-9bf5-e42af04b5c5c"), + testAzureRMClientConfigAttr(dataSourceName, "display_name", "Allowed resource types"), + testAzureRMClientConfigAttr(dataSourceName, "type", "Microsoft.Authorization/policyDefinitions"), + testAzureRMClientConfigAttr(dataSourceName, "description", "This policy enables you to specify the resource types that your organization can deploy."), + ), + }, + }, + }) +} + +func TestAccDataSourceAzureRMPolicyDefinition_builtIn_AtManagementGroup(t *testing.T) { + dataSourceName := "data.azurerm_policy_definition.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceBuiltInPolicyDefinitionAtManagementGroup("Allowed resource types"), + Check: resource.ComposeTestCheckFunc( + testAzureRMClientConfigAttr(dataSourceName, "id", "/providers/Microsoft.Authorization/policyDefinitions/a08ec900-254a-4555-9bf5-e42af04b5c5c"), + ), + }, + }, + }) +} + +func TestAccDataSourceAzureRMPolicyDefinition_custom(t *testing.T) { + ri := tf.AccRandTimeInt() + dataSourceName := "data.azurerm_policy_definition.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceCustomPolicyDefinition(ri), + Check: resource.ComposeTestCheckFunc( + testAzureRMAttrExists(dataSourceName, "id"), + testAzureRMClientConfigAttr(dataSourceName, "name", fmt.Sprintf("acctestpol-%d", ri)), + testAzureRMClientConfigAttr(dataSourceName, "display_name", fmt.Sprintf("acctestpol-%d", ri)), + testAzureRMClientConfigAttr(dataSourceName, "type", "Microsoft.Authorization/policyDefinitions"), + testAzureRMClientConfigAttr(dataSourceName, "policy_type", "Custom"), + testAzureRMClientConfigAttr(dataSourceName, "policy_rule", "{\"if\":{\"not\":{\"field\":\"location\",\"in\":\"[parameters('allowedLocations')]\"}},\"then\":{\"effect\":\"audit\"}}"), + testAzureRMClientConfigAttr(dataSourceName, "parameters", "{\"allowedLocations\":{\"metadata\":{\"description\":\"The list of allowed locations for resources.\",\"displayName\":\"Allowed locations\",\"strongType\":\"location\"},\"type\":\"Array\"}}"), + testAzureRMClientConfigAttr(dataSourceName, "metadata", "{\"note\":\"azurerm acceptance test\"}"), + ), + }, + }, + }) +} + +func testAccDataSourceBuiltInPolicyDefinition(name string) string { + return fmt.Sprintf(` +data "azurerm_policy_definition" "test" { + display_name = "%s" +} +`, name) +} + +func testAccDataSourceBuiltInPolicyDefinitionAtManagementGroup(name string) string { + return fmt.Sprintf(` + +data "azurerm_client_config" "current" {} + +data "azurerm_policy_definition" "test" { + display_name = "%s" + management_group_id = "${data.azurerm_client_config.current.tenant_id}" +} +`, name) +} + +func testAccDataSourceCustomPolicyDefinition(ri int) string { + return fmt.Sprintf(` +resource "azurerm_policy_definition" "test_policy" { + name = "acctestpol-%d" + policy_type = "Custom" + mode = "All" + display_name = "acctestpol-%d" + + policy_rule = < s.MaxAllowedGB { + return fmt.Errorf("service tier '%s' with a 'capacity' of %d must have a 'max_size_gb' no greater than %d GB, got %d GB", s.Tier, s.Capacity, int(s.MaxAllowedGB), int(s.MaxSizeGb)) + } + + if int(s.MaxSizeGb) < 50 { + return fmt.Errorf("service tier '%s', must have a 'max_size_gb' value equal to or greater than 50 GB, got %d GB", s.Tier, int(s.MaxSizeGb)) + } + + // Check to see if the max_size_gb value is valid for this SKU type and capacity + if supportedDTUMaxGBValues[int(s.MaxSizeGb)] != 1 { + return fmt.Errorf(getDTUNotValidSizeErrorMsg(s)) + } + } + + // All Other DTU based SKU Checks + if s.MinCapacity != math.Trunc(s.MinCapacity) { + return fmt.Errorf("service tier '%s' must have whole numbers as their 'minCapacity'", s.Tier) + } + + if s.MaxCapacity != math.Trunc(s.MaxCapacity) { + return fmt.Errorf("service tier '%s' must have whole numbers as their 'maxCapacity'", s.Tier) + } + + return nil +} + +func doVCoreSKUValidation(s sku) error { + + if s.MaxAllowedGB == 0 { + return fmt.Errorf(getVCoreCapacityErrorMsg(s)) + } + + if s.MaxSizeGb > s.MaxAllowedGB { + return fmt.Errorf("service tier '%s' %s with a 'capacity' of %d vCores must have a 'max_size_gb' between 5 GB and %d GB, got %d GB", s.Tier, s.Family, s.Capacity, int(s.MaxAllowedGB), int(s.MaxSizeGb)) + } + + if int(s.MaxSizeGb) < 5 { + return fmt.Errorf("service tier '%s' must have a 'max_size_gb' value equal to or greater than 5 GB, got %d GB", s.Tier, int(s.MaxSizeGb)) + } + + if s.MaxSizeGb != math.Trunc(s.MaxSizeGb) { + return fmt.Errorf("'max_size_gb' must be a whole number, got %f GB", s.MaxSizeGb) + } + + if s.MaxCapacity > float64(s.Capacity) { + return fmt.Errorf("service tier '%s' perDatabaseSettings 'maxCapacity'(%d) must not be higher than the SKUs 'capacity'(%d) value", s.Tier, int(s.MaxCapacity), s.Capacity) + } + + if s.MinCapacity > s.MaxCapacity { + return fmt.Errorf("perDatabaseSettings 'maxCapacity'(%d) must be greater than or equal to the perDatabaseSettings 'minCapacity'(%d) value", int(s.MaxCapacity), int(s.MinCapacity)) + } + + return nil +} diff --git a/azurerm/helpers/azure/key_vault.go b/azurerm/helpers/azure/key_vault.go new file mode 100644 index 000000000000..abdb28865c15 --- /dev/null +++ b/azurerm/helpers/azure/key_vault.go @@ -0,0 +1,121 @@ +package azure + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2018-02-14/keyvault" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func GetKeyVaultBaseUrlFromID(ctx context.Context, client keyvault.VaultsClient, keyVaultId string) (string, error) { + + if keyVaultId == "" { + return "", fmt.Errorf("keyVaultId is empty") + } + + id, err := ParseAzureResourceID(keyVaultId) + if err != nil { + return "", err + } + resourceGroup := id.ResourceGroup + + vaultName, ok := id.Path["vaults"] + if !ok { + return "", fmt.Errorf("resource id does not contain `vaults`: %q", keyVaultId) + } + + resp, err := client.Get(ctx, resourceGroup, vaultName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return "", fmt.Errorf("Error unable to find KeyVault %q (Resource Group %q): %+v", vaultName, resourceGroup, err) + } + return "", fmt.Errorf("Error making Read request on KeyVault %q (Resource Group %q): %+v", vaultName, resourceGroup, err) + } + + if resp.Properties == nil || resp.Properties.VaultURI == nil { + return "", fmt.Errorf("vault (%s) response properties or VaultURI is nil", keyVaultId) + } + + return *resp.Properties.VaultURI, nil +} + +func GetKeyVaultIDFromBaseUrl(ctx context.Context, client keyvault.VaultsClient, keyVaultUrl string) (*string, error) { + list, err := client.ListComplete(ctx, utils.Int32(1000)) + if err != nil { + return nil, fmt.Errorf("Error GetKeyVaultId unable to list Key Vaults %v", err) + } + + for list.NotDone() { + v := list.Value() + + if v.ID == nil { + return nil, fmt.Errorf("v.ID was nil") + } + + vid, err := ParseAzureResourceID(*v.ID) + if err != nil { + return nil, fmt.Errorf("Error parsing ID for Key Vault URI %q: %s", *v.ID, err) + } + resourceGroup := vid.ResourceGroup + name := vid.Path["vaults"] + + //resp does not appear to contain the vault properties, so lets fetch them + get, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(get.Response) { + if e := list.NextWithContext(ctx); e != nil { + return nil, fmt.Errorf("Error getting next vault on KeyVault url %q : %+v", keyVaultUrl, err) + } + continue + } + return nil, fmt.Errorf("Error making Read request on KeyVault %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if get.ID == nil || get.Properties == nil || get.Properties.VaultURI == nil { + return nil, fmt.Errorf("KeyVault %q (Resource Group %q) has nil ID, properties or vault URI", name, resourceGroup) + } + + if keyVaultUrl == *get.Properties.VaultURI { + return get.ID, nil + } + + if e := list.NextWithContext(ctx); e != nil { + return nil, fmt.Errorf("Error getting next vault on KeyVault url %q : %+v", keyVaultUrl, err) + } + } + + // we haven't found it, but Data Sources and Resources need to handle this error separately + return nil, nil +} + +func KeyVaultExists(ctx context.Context, client keyvault.VaultsClient, keyVaultId string) (bool, error) { + + if keyVaultId == "" { + return false, fmt.Errorf("keyVaultId is empty") + } + + id, err := ParseAzureResourceID(keyVaultId) + if err != nil { + return false, err + } + resourceGroup := id.ResourceGroup + + vaultName, ok := id.Path["vaults"] + if !ok { + return false, fmt.Errorf("resource id does not contain `vaults`: %q", keyVaultId) + } + + resp, err := client.Get(ctx, resourceGroup, vaultName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return false, nil + } + return false, fmt.Errorf("Error making Read request on KeyVault %q (Resource Group %q): %+v", vaultName, resourceGroup, err) + } + + if resp.Properties == nil || resp.Properties.VaultURI == nil { + return false, fmt.Errorf("vault (%s) response properties or VaultURI is nil", keyVaultId) + } + + return true, nil +} diff --git a/azurerm/helpers/azure/key_vault_access_policy.go b/azurerm/helpers/azure/key_vault_access_policy.go index bb14984191e1..270b72773910 100644 --- a/azurerm/helpers/azure/key_vault_access_policy.go +++ b/azurerm/helpers/azure/key_vault_access_policy.go @@ -4,7 +4,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2018-02-14/keyvault" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" - "github.com/satori/go.uuid" + uuid "github.com/satori/go.uuid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" ) diff --git a/azurerm/helpers/validate/api_management.go b/azurerm/helpers/validate/api_management.go index b0780799a52c..d75d4208133a 100644 --- a/azurerm/helpers/validate/api_management.go +++ b/azurerm/helpers/validate/api_management.go @@ -5,6 +5,17 @@ import ( "regexp" ) +func ApiManagementChildName(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + + // from the portal: `The field may contain only numbers, letters, and dash (-) sign when preceded and followed by number or a letter.` + if matched := regexp.MustCompile(`(^[a-zA-Z0-9])([a-zA-Z0-9-]{1,78})([a-zA-Z0-9]$)`).Match([]byte(value)); !matched { + errors = append(errors, fmt.Errorf("%q may only contain alphanumeric characters and dashes up to 80 characters in length", k)) + } + + return warnings, errors +} + func ApiManagementServiceName(v interface{}, k string) (warnings []string, errors []error) { value := v.(string) @@ -15,6 +26,19 @@ func ApiManagementServiceName(v interface{}, k string) (warnings []string, error return warnings, errors } +func ApiManagementUserName(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + + // TODO: confirm this + + // from the portal: `The field may contain only numbers, letters, and dash (-) sign when preceded and followed by number or a letter.` + if matched := regexp.MustCompile(`(^[a-zA-Z0-9])([a-zA-Z0-9-]{1,78})([a-zA-Z0-9]$)`).Match([]byte(value)); !matched { + errors = append(errors, fmt.Errorf("%q may only contain alphanumeric characters and dashes up to 80 characters in length", k)) + } + + return warnings, errors +} + func ApiManagementServicePublisherName(v interface{}, k string) (warnings []string, errors []error) { value := v.(string) diff --git a/azurerm/helpers/validate/kubernetes.go b/azurerm/helpers/validate/kubernetes.go new file mode 100644 index 000000000000..fd7c7349b935 --- /dev/null +++ b/azurerm/helpers/validate/kubernetes.go @@ -0,0 +1,39 @@ +package validate + +import ( + "fmt" + "regexp" +) + +func KubernetesAdminUserName(i interface{}, k string) (warnings []string, errors []error) { + adminUserName := i.(string) + + re := regexp.MustCompile(`^[A-Za-z][-A-Za-z0-9_]*$`) + if re != nil && !re.MatchString(adminUserName) { + errors = append(errors, fmt.Errorf("%s must start with alphabet and/or continue with alphanumeric characters, underscores, hyphens. Got %q.", k, adminUserName)) + } + + return warnings, errors +} + +func KubernetesAgentPoolName(i interface{}, k string) (warnings []string, errors []error) { + agentPoolName := i.(string) + + re := regexp.MustCompile(`^[a-z]{1}[a-z0-9]{0,11}$`) + if re != nil && !re.MatchString(agentPoolName) { + errors = append(errors, fmt.Errorf("%s must start with a lowercase letter, have max length of 12, and only have characters a-z0-9. Got %q.", k, agentPoolName)) + } + + return warnings, errors +} + +func KubernetesDNSPrefix(i interface{}, k string) (warnings []string, errors []error) { + dnsPrefix := i.(string) + + re := regexp.MustCompile(`^[a-zA-Z][-a-zA-Z0-9]{0,43}[a-zA-Z0-9]$`) + if re != nil && !re.MatchString(dnsPrefix) { + errors = append(errors, fmt.Errorf("%s must contain between 2 and 45 characters. The name can contain only letters, numbers, and hyphens. The name must start with a letter and must end with an alphanumeric character.. Got %q.", k, dnsPrefix)) + } + + return warnings, errors +} diff --git a/azurerm/helpers/validate/kubernetes_test.go b/azurerm/helpers/validate/kubernetes_test.go new file mode 100644 index 000000000000..cd7d7669c9a6 --- /dev/null +++ b/azurerm/helpers/validate/kubernetes_test.go @@ -0,0 +1,135 @@ +package validate + +import ( + "testing" +) + +func TestKubernetesAdminUserName(t *testing.T) { + cases := []struct { + AdminUserName string + Errors int + }{ + { + AdminUserName: "", + Errors: 1, + }, + { + AdminUserName: "Abc-123_abc", + Errors: 0, + }, + { + AdminUserName: "123abc", + Errors: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.AdminUserName, func(t *testing.T) { + _, errors := KubernetesAdminUserName(tc.AdminUserName, "test") + + if len(errors) != tc.Errors { + t.Fatalf("Expected AdminUserName to return %d error(s) not %d", tc.Errors, len(errors)) + } + }) + } +} + +func TestKubernetesAgentPoolName(t *testing.T) { + cases := []struct { + AgentPoolName string + Errors int + }{ + { + AgentPoolName: "", + Errors: 1, + }, + { + AgentPoolName: "ABC123", + Errors: 1, + }, + { + AgentPoolName: "abc123", + Errors: 0, + }, + { + AgentPoolName: "123abc", + Errors: 1, + }, + { + AgentPoolName: "hi", + Errors: 0, + }, + { + AgentPoolName: "hello", + Errors: 0, + }, + { + AgentPoolName: "hello-world", + Errors: 1, + }, + { + AgentPoolName: "helloworld123", + Errors: 1, + }, + { + AgentPoolName: "hello_world", + Errors: 1, + }, + { + AgentPoolName: "Hello-World", + Errors: 1, + }, + { + AgentPoolName: "20202020", + Errors: 1, + }, + { + AgentPoolName: "h20202020", + Errors: 0, + }, + { + AgentPoolName: "ABC123!@£", + Errors: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.AgentPoolName, func(t *testing.T) { + _, errors := KubernetesAgentPoolName(tc.AgentPoolName, "test") + + if len(errors) != tc.Errors { + t.Fatalf("Expected AgentPoolName to return %d error(s) not %d", tc.Errors, len(errors)) + } + }) + } +} + +func TestKubernetesDNSPrefix(t *testing.T) { + cases := []struct { + DNSPrefix string + Errors int + }{ + { + DNSPrefix: "", + Errors: 1, + }, + { + DNSPrefix: "a", + Errors: 1, + }, + { + DNSPrefix: "aBc-123abc", + Errors: 0, + }, + } + + for _, tc := range cases { + t.Run(tc.DNSPrefix, func(t *testing.T) { + _, errors := KubernetesDNSPrefix(tc.DNSPrefix, "test") + + if len(errors) != tc.Errors { + t.Fatalf("Expected DNSPrefix to return %d error(s) not %d", tc.Errors, len(errors)) + } + }) + } +} diff --git a/azurerm/helpers/validate/network.go b/azurerm/helpers/validate/network.go index 1b5841a379e0..fac8c52d1e6e 100644 --- a/azurerm/helpers/validate/network.go +++ b/azurerm/helpers/validate/network.go @@ -3,6 +3,7 @@ package validate import ( "fmt" "net" + "regexp" ) func IPv6Address(i interface{}, k string) (warnings []string, errors []error) { @@ -29,6 +30,17 @@ func validateIpv6Address(i interface{}, k string, allowEmpty bool) (warnings []s } +func CIDR(i interface{}, k string) (warnings []string, errors []error) { + cidr := i.(string) + + re := regexp.MustCompile(`^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`) + if re != nil && !re.MatchString(cidr) { + errors = append(errors, fmt.Errorf("%s must start with IPV4 address and/or slash, number of bits (0-32) as prefix. Example: 127.0.0.1/8. Got %q.", k, cidr)) + } + + return warnings, errors +} + func IPv4Address(i interface{}, k string) (warnings []string, errors []error) { return validateIpv4Address(i, k, false) } diff --git a/azurerm/helpers/validate/network_test.go b/azurerm/helpers/validate/network_test.go index 0c59bb15c756..263b288aaad1 100644 --- a/azurerm/helpers/validate/network_test.go +++ b/azurerm/helpers/validate/network_test.go @@ -5,6 +5,44 @@ import ( "testing" ) +func TestCIDR(t *testing.T) { + cases := []struct { + CIDR string + Errors int + }{ + { + CIDR: "", + Errors: 1, + }, + { + CIDR: "0.0.0.0", + Errors: 0, + }, + { + CIDR: "127.0.0.1/8", + Errors: 0, + }, + { + CIDR: "127.0.0.1/33", + Errors: 1, + }, + { + CIDR: "127.0.0.1/-1", + Errors: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.CIDR, func(t *testing.T) { + _, errors := CIDR(tc.CIDR, "test") + + if len(errors) != tc.Errors { + t.Fatalf("Expected CIDR to return %d error(s) not %d", tc.Errors, len(errors)) + } + }) + } +} + func TestIPv6Address(t *testing.T) { cases := []struct { IP string diff --git a/azurerm/loadbalancer.go b/azurerm/loadbalancer.go index 1a5dec709790..d2c4cef90d2c 100644 --- a/azurerm/loadbalancer.go +++ b/azurerm/loadbalancer.go @@ -83,6 +83,20 @@ func findLoadBalancerRuleByName(lb *network.LoadBalancer, name string) (*network return nil, -1, false } +func findLoadBalancerOutboundRuleByName(lb *network.LoadBalancer, name string) (*network.OutboundRule, int, bool) { + if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.OutboundRules == nil { + return nil, -1, false + } + + for i, or := range *lb.LoadBalancerPropertiesFormat.OutboundRules { + if or.Name != nil && *or.Name == name { + return &or, i, true + } + } + + return nil, -1, false +} + func findLoadBalancerNatRuleByName(lb *network.LoadBalancer, name string) (*network.InboundNatRule, int, bool) { if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.InboundNatRules == nil { return nil, -1, false diff --git a/azurerm/provider.go b/azurerm/provider.go index a39fb930abc0..9429b84147b4 100644 --- a/azurerm/provider.go +++ b/azurerm/provider.go @@ -99,10 +99,14 @@ func Provider() terraform.ResourceProvider { DataSourcesMap: map[string]*schema.Resource{ "azurerm_api_management": dataSourceApiManagementService(), + "azurerm_api_management_group": dataSourceApiManagementGroup(), + "azurerm_api_management_product": dataSourceApiManagementProduct(), + "azurerm_api_management_user": dataSourceArmApiManagementUser(), "azurerm_app_service_plan": dataSourceAppServicePlan(), "azurerm_app_service": dataSourceArmAppService(), "azurerm_application_insights": dataSourceArmApplicationInsights(), "azurerm_application_security_group": dataSourceArmApplicationSecurityGroup(), + "azurerm_availability_set": dataSourceArmAvailabilitySet(), "azurerm_azuread_application": dataSourceArmAzureADApplication(), "azurerm_azuread_service_principal": dataSourceArmActiveDirectoryServicePrincipal(), "azurerm_batch_account": dataSourceArmBatchAccount(), @@ -133,9 +137,11 @@ func Provider() terraform.ResourceProvider { "azurerm_monitor_log_profile": dataSourceArmMonitorLogProfile(), "azurerm_network_interface": dataSourceArmNetworkInterface(), "azurerm_network_security_group": dataSourceArmNetworkSecurityGroup(), + "azurerm_network_watcher": dataSourceArmNetworkWatcher(), "azurerm_notification_hub_namespace": dataSourceNotificationHubNamespace(), "azurerm_notification_hub": dataSourceNotificationHub(), "azurerm_platform_image": dataSourceArmPlatformImage(), + "azurerm_policy_definition": dataSourceArmPolicyDefinition(), "azurerm_public_ip": dataSourceArmPublicIP(), "azurerm_public_ips": dataSourceArmPublicIPs(), "azurerm_recovery_services_vault": dataSourceArmRecoveryServicesVault(), @@ -143,6 +149,7 @@ func Provider() terraform.ResourceProvider { "azurerm_role_definition": dataSourceArmRoleDefinition(), "azurerm_route_table": dataSourceArmRouteTable(), "azurerm_scheduler_job_collection": dataSourceArmSchedulerJobCollection(), + "azurerm_servicebus_namespace": dataSourceArmServiceBusNamespace(), "azurerm_shared_image_gallery": dataSourceArmSharedImageGallery(), "azurerm_shared_image_version": dataSourceArmSharedImageVersion(), "azurerm_shared_image": dataSourceArmSharedImage(), @@ -160,6 +167,9 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "azurerm_api_management": resourceArmApiManagementService(), + "azurerm_api_management_group": resourceArmApiManagementGroup(), + "azurerm_api_management_product": resourceArmApiManagementProduct(), + "azurerm_api_management_user": resourceArmApiManagementUser(), "azurerm_app_service_active_slot": resourceArmAppServiceActiveSlot(), "azurerm_app_service_custom_hostname_binding": resourceArmAppServiceCustomHostnameBinding(), "azurerm_app_service_plan": resourceArmAppServicePlan(), @@ -186,6 +196,7 @@ func Provider() terraform.ResourceProvider { "azurerm_cdn_endpoint": resourceArmCdnEndpoint(), "azurerm_cdn_profile": resourceArmCdnProfile(), "azurerm_cognitive_account": resourceArmCognitiveAccount(), + "azurerm_connection_monitor": resourceArmConnectionMonitor(), "azurerm_container_group": resourceArmContainerGroup(), "azurerm_container_registry": resourceArmContainerRegistry(), "azurerm_container_service": resourceArmContainerService(), @@ -213,6 +224,7 @@ func Provider() terraform.ResourceProvider { "azurerm_dns_srv_record": resourceArmDnsSrvRecord(), "azurerm_dns_txt_record": resourceArmDnsTxtRecord(), "azurerm_dns_zone": resourceArmDnsZone(), + "azurerm_eventgrid_domain": resourceArmEventGridDomain(), "azurerm_eventgrid_topic": resourceArmEventGridTopic(), "azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(), "azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(), @@ -239,10 +251,12 @@ func Provider() terraform.ResourceProvider { "azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(), "azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(), "azurerm_lb_probe": resourceArmLoadBalancerProbe(), + "azurerm_lb_outbound_rule": resourceArmLoadBalancerOutboundRule(), "azurerm_lb_rule": resourceArmLoadBalancerRule(), "azurerm_lb": resourceArmLoadBalancer(), "azurerm_local_network_gateway": resourceArmLocalNetworkGateway(), "azurerm_log_analytics_solution": resourceArmLogAnalyticsSolution(), + "azurerm_log_analytics_linked_service": resourceArmLogAnalyticsLinkedService(), "azurerm_log_analytics_workspace_linked_service": resourceArmLogAnalyticsWorkspaceLinkedService(), "azurerm_log_analytics_workspace": resourceArmLogAnalyticsWorkspace(), "azurerm_logic_app_action_custom": resourceArmLogicAppActionCustom(), @@ -256,12 +270,15 @@ func Provider() terraform.ResourceProvider { "azurerm_management_lock": resourceArmManagementLock(), "azurerm_mariadb_database": resourceArmMariaDbDatabase(), "azurerm_mariadb_server": resourceArmMariaDbServer(), + "azurerm_media_services_account": resourceArmMediaServicesAccount(), "azurerm_metric_alertrule": resourceArmMetricAlertRule(), + "azurerm_monitor_autoscale_setting": resourceArmMonitorAutoScaleSetting(), "azurerm_monitor_action_group": resourceArmMonitorActionGroup(), "azurerm_monitor_activity_log_alert": resourceArmMonitorActivityLogAlert(), "azurerm_monitor_diagnostic_setting": resourceArmMonitorDiagnosticSetting(), "azurerm_monitor_log_profile": resourceArmMonitorLogProfile(), "azurerm_monitor_metric_alert": resourceArmMonitorMetricAlert(), + "azurerm_monitor_metric_alertrule": resourceArmMonitorMetricAlertRule(), "azurerm_mssql_elasticpool": resourceArmMsSqlElasticPool(), "azurerm_mysql_configuration": resourceArmMySQLConfiguration(), "azurerm_mysql_database": resourceArmMySqlDatabase(), @@ -269,6 +286,7 @@ func Provider() terraform.ResourceProvider { "azurerm_mysql_server": resourceArmMySqlServer(), "azurerm_mysql_virtual_network_rule": resourceArmMySqlVirtualNetworkRule(), "azurerm_network_interface_application_gateway_backend_address_pool_association": resourceArmNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation(), + "azurerm_network_interface_application_security_group_association": resourceArmNetworkInterfaceApplicationSecurityGroupAssociation(), "azurerm_network_interface_backend_address_pool_association": resourceArmNetworkInterfaceBackendAddressPoolAssociation(), "azurerm_network_interface_nat_rule_association": resourceArmNetworkInterfaceNatRuleAssociation(), "azurerm_network_interface": resourceArmNetworkInterface(), diff --git a/azurerm/required_resource_providers.go b/azurerm/required_resource_providers.go index abfe96e74654..e159b46a887d 100644 --- a/azurerm/required_resource_providers.go +++ b/azurerm/required_resource_providers.go @@ -42,6 +42,7 @@ func requiredResourceProviders() map[string]struct{} { "Microsoft.Logic": {}, "Microsoft.ManagedIdentity": {}, "Microsoft.Management": {}, + "Microsoft.Media": {}, "Microsoft.Network": {}, "Microsoft.NotificationHubs": {}, "Microsoft.OperationalInsights": {}, diff --git a/azurerm/resource_arm_api_management.go b/azurerm/resource_arm_api_management.go index 64eccaa010bb..130b1fd23146 100644 --- a/azurerm/resource_arm_api_management.go +++ b/azurerm/resource_arm_api_management.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement" + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" @@ -34,12 +34,7 @@ func resourceArmApiManagementService() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.ApiManagementServiceName, - }, + "name": azure.SchemaApiManagementName(), "resource_group_name": resourceGroupNameSchema(), diff --git a/azurerm/resource_arm_api_management_group.go b/azurerm/resource_arm_api_management_group.go new file mode 100644 index 000000000000..6d483c3baf41 --- /dev/null +++ b/azurerm/resource_arm_api_management_group.go @@ -0,0 +1,157 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmApiManagementGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceArmApiManagementGroupCreateUpdate, + Read: resourceArmApiManagementGroupRead, + Update: resourceArmApiManagementGroupCreateUpdate, + Delete: resourceArmApiManagementGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": azure.SchemaApiManagementChildName(), + + "resource_group_name": resourceGroupNameSchema(), + + "api_management_name": azure.SchemaApiManagementName(), + + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "external_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: string(apimanagement.Custom), + ValidateFunc: validation.StringInSlice([]string{ + string(apimanagement.Custom), + string(apimanagement.External), + }, false), + }, + }, + } +} + +func resourceArmApiManagementGroupCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementGroupClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + serviceName := d.Get("api_management_name").(string) + + displayName := d.Get("display_name").(string) + description := d.Get("description").(string) + externalID := d.Get("external_id").(string) + groupType := d.Get("type").(string) + + parameters := apimanagement.GroupCreateParameters{ + GroupCreateParametersProperties: &apimanagement.GroupCreateParametersProperties{ + DisplayName: utils.String(displayName), + Description: utils.String(description), + ExternalID: utils.String(externalID), + Type: apimanagement.GroupType(groupType), + }, + } + + if _, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, name, parameters, ""); err != nil { + return fmt.Errorf("Error creating or updating Group %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + } + + resp, err := client.Get(ctx, resourceGroup, serviceName, name) + if err != nil { + return fmt.Errorf("Error retrieving Group %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + } + if resp.ID == nil { + return fmt.Errorf("Cannot read ID for Group %q (Resource Group %q / API Management Service %q)", name, resourceGroup, serviceName) + } + d.SetId(*resp.ID) + + return resourceArmApiManagementGroupRead(d, meta) +} + +func resourceArmApiManagementGroupRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementGroupClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + serviceName := id.Path["service"] + name := id.Path["groups"] + + resp, err := client.Get(ctx, resourceGroup, serviceName, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Group %q (Resource Group %q / API Management Service %q) was not found - removing from state!", name, resourceGroup, serviceName) + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request for Group %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resourceGroup) + d.Set("api_management_name", serviceName) + + if properties := resp.GroupContractProperties; properties != nil { + d.Set("display_name", properties.DisplayName) + d.Set("description", properties.Description) + d.Set("external_id", properties.ExternalID) + d.Set("type", string(properties.Type)) + } + + return nil +} + +func resourceArmApiManagementGroupDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementGroupClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + serviceName := id.Path["service"] + name := id.Path["groups"] + + if resp, err := client.Delete(ctx, resourceGroup, serviceName, name, ""); err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("Error deleting Group %q (Resource Group %q / API Management Service %q): %+v", name, resourceGroup, serviceName, err) + } + } + + return nil +} diff --git a/azurerm/resource_arm_api_management_group_test.go b/azurerm/resource_arm_api_management_group_test.go new file mode 100644 index 000000000000..fa48a8b5e061 --- /dev/null +++ b/azurerm/resource_arm_api_management_group_test.go @@ -0,0 +1,218 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMAPIManagementGroup_basic(t *testing.T) { + resourceName := "azurerm_api_management_group.test" + ri := tf.AccRandTimeInt() + config := testAccAzureRMAPIManagementGroup_basic(ri, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAPIManagementGroupDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAPIManagementGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Group"), + resource.TestCheckResourceAttr(resourceName, "type", "custom"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMAPIManagementGroup_complete(t *testing.T) { + resourceName := "azurerm_api_management_group.test" + ri := tf.AccRandTimeInt() + config := testAccAzureRMAPIManagementGroup_complete(ri, testLocation(), "Test Group", "A test description.") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAPIManagementGroupDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAPIManagementGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Group"), + resource.TestCheckResourceAttr(resourceName, "description", "A test description."), + resource.TestCheckResourceAttr(resourceName, "type", "external"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMAPIManagementGroup_descriptionDisplayNameUpdate(t *testing.T) { + resourceName := "azurerm_api_management_group.test" + ri := tf.AccRandTimeInt() + preConfig := testAccAzureRMAPIManagementGroup_complete(ri, testLocation(), "Original Group", "The original description.") + postConfig := testAccAzureRMAPIManagementGroup_complete(ri, testLocation(), "Modified Group", "A modified description.") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAPIManagementGroupDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAPIManagementGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "display_name", "Original Group"), + resource.TestCheckResourceAttr(resourceName, "description", "The original description."), + resource.TestCheckResourceAttr(resourceName, "type", "external"), + ), + }, + { + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAPIManagementGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "display_name", "Modified Group"), + resource.TestCheckResourceAttr(resourceName, "description", "A modified description."), + resource.TestCheckResourceAttr(resourceName, "type", "external"), + ), + }, + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAPIManagementGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "display_name", "Original Group"), + resource.TestCheckResourceAttr(resourceName, "description", "The original description."), + resource.TestCheckResourceAttr(resourceName, "type", "external"), + ), + }, + }, + }) +} + +func testCheckAzureRMAPIManagementGroupDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).apiManagementGroupClient + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_api_management_group" { + continue + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + serviceName := rs.Primary.Attributes["api_management_name"] + + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := client.Get(ctx, resourceGroup, serviceName, name) + + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return err + } + } + + return nil + } + return nil +} + +func testCheckAzureRMAPIManagementGroupExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + serviceName := rs.Primary.Attributes["api_management_name"] + + client := testAccProvider.Meta().(*ArmClient).apiManagementGroupClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := client.Get(ctx, resourceGroup, serviceName, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Bad: API Management Group %q (Resource Group %q / API Management Service %q) does not exist", name, resourceGroup, serviceName) + } + return fmt.Errorf("Bad: Get on apiManagementGroupClient: %+v", err) + } + + return nil + } +} + +func testAccAzureRMAPIManagementGroup_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_group" "test" { + name = "acctestAMGroup-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + api_management_name = "${azurerm_api_management.test.name}" + display_name = "Test Group" +} +`, rInt, location, rInt, rInt) +} + +func testAccAzureRMAPIManagementGroup_complete(rInt int, location string, displayName, description string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_group" "test" { + name = "acctestAMGroup-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + api_management_name = "${azurerm_api_management.test.name}" + display_name = "%s" + description = "%s" + type = "external" +} +`, rInt, location, rInt, rInt, displayName, description) +} diff --git a/azurerm/resource_arm_api_management_product.go b/azurerm/resource_arm_api_management_product.go new file mode 100644 index 000000000000..19b6faecca67 --- /dev/null +++ b/azurerm/resource_arm_api_management_product.go @@ -0,0 +1,204 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmApiManagementProduct() *schema.Resource { + return &schema.Resource{ + Create: resourceArmApiManagementProductCreateUpdate, + Read: resourceArmApiManagementProductRead, + Update: resourceArmApiManagementProductCreateUpdate, + Delete: resourceArmApiManagementProductDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "product_id": azure.SchemaApiManagementChildName(), + + "api_management_name": azure.SchemaApiManagementName(), + + "resource_group_name": resourceGroupNameSchema(), + + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "subscription_required": { + Type: schema.TypeBool, + Required: true, + }, + + "published": { + Type: schema.TypeBool, + Required: true, + }, + + "approval_required": { + Type: schema.TypeBool, + Optional: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "terms": { + Type: schema.TypeString, + Optional: true, + }, + + "subscriptions_limit": { + Type: schema.TypeInt, + Optional: true, + }, + }, + } +} + +func resourceArmApiManagementProductCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementProductsClient + ctx := meta.(*ArmClient).StopContext + + log.Printf("[INFO] preparing arguments for API Management Product creation.") + + resourceGroup := d.Get("resource_group_name").(string) + serviceName := d.Get("api_management_name").(string) + productId := d.Get("product_id").(string) + + displayName := d.Get("display_name").(string) + description := d.Get("description").(string) + terms := d.Get("terms").(string) + subscriptionRequired := d.Get("subscription_required").(bool) + approvalRequired := d.Get("approval_required").(bool) + subscriptionsLimit := d.Get("subscriptions_limit").(int) + published := d.Get("published").(bool) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serviceName, productId) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Product %q (API Management Service %q / Resource Group %q): %s", productId, serviceName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_api_management_product", *existing.ID) + } + } + publishedVal := apimanagement.NotPublished + if published { + publishedVal = apimanagement.Published + } + + properties := apimanagement.ProductContract{ + ProductContractProperties: &apimanagement.ProductContractProperties{ + Description: utils.String(description), + DisplayName: utils.String(displayName), + State: publishedVal, + SubscriptionRequired: utils.Bool(subscriptionRequired), + Terms: utils.String(terms), + }, + } + + // Swagger says: Can be present only if subscriptionRequired property is present and has a value of false. + // API/Portal says: Cannot provide values for approvalRequired and subscriptionsLimit when subscriptionRequired is set to false in the request payload + if subscriptionRequired && subscriptionsLimit > 0 { + properties.ProductContractProperties.ApprovalRequired = utils.Bool(approvalRequired) + properties.ProductContractProperties.SubscriptionsLimit = utils.Int32(int32(subscriptionsLimit)) + } + + if _, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, productId, properties, ""); err != nil { + return fmt.Errorf("Error creating/updating Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + } + + read, err := client.Get(ctx, resourceGroup, serviceName, productId) + if err != nil { + return fmt.Errorf("Error retrieving Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + } + + if read.ID == nil { + return fmt.Errorf("Cannot read ID for Product %q (API Management Service %q / Resource Group %q)", productId, serviceName, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmApiManagementProductRead(d, meta) +} + +func resourceArmApiManagementProductRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementProductsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resourceGroup := id.ResourceGroup + serviceName := id.Path["service"] + productId := id.Path["products"] + + resp, err := client.Get(ctx, resourceGroup, serviceName, productId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("Product %q was not found in API Management Service %q / Resource Group %q - removing from state!", productId, serviceName, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + } + + d.Set("product_id", productId) + d.Set("api_management_name", serviceName) + d.Set("resource_group_name", resourceGroup) + + if props := resp.ProductContractProperties; props != nil { + d.Set("approval_required", props.ApprovalRequired) + d.Set("description", props.Description) + d.Set("display_name", props.DisplayName) + d.Set("published", props.State == apimanagement.Published) + d.Set("subscriptions_limit", props.SubscriptionsLimit) + d.Set("subscription_required", props.SubscriptionRequired) + d.Set("terms", props.Terms) + } + + return nil +} + +func resourceArmApiManagementProductDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementProductsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + serviceName := id.Path["service"] + productId := id.Path["products"] + + log.Printf("[DEBUG] Deleting Product %q (API Management Service %q / Resource Grouo %q)", productId, serviceName, resourceGroup) + deleteSubscriptions := true + resp, err := client.Delete(ctx, resourceGroup, serviceName, productId, "", utils.Bool(deleteSubscriptions)) + if err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("Error deleting Product %q (API Management Service %q / Resource Group %q): %+v", productId, serviceName, resourceGroup, err) + } + } + + return nil +} diff --git a/azurerm/resource_arm_api_management_product_test.go b/azurerm/resource_arm_api_management_product_test.go new file mode 100644 index 000000000000..d69efc52b3fb --- /dev/null +++ b/azurerm/resource_arm_api_management_product_test.go @@ -0,0 +1,398 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMApiManagementProduct_basic(t *testing.T) { + resourceName := "azurerm_api_management_product.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementProductDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementProduct_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "approval_required", "false"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Product"), + resource.TestCheckResourceAttr(resourceName, "product_id", "test-product"), + resource.TestCheckResourceAttr(resourceName, "published", "false"), + resource.TestCheckResourceAttr(resourceName, "subscription_required", "false"), + resource.TestCheckResourceAttr(resourceName, "terms", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMApiManagementProduct_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_api_management_product.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementProductDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementProduct_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + ), + }, + { + Config: testAccAzureRMApiManagementProduct_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_api_management_product"), + }, + }, + }) +} + +func testCheckAzureRMApiManagementProductDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).apiManagementProductsClient + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_api_management_product" { + continue + } + + productId := rs.Primary.Attributes["product_id"] + serviceName := rs.Primary.Attributes["api_management_name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := conn.Get(ctx, resourceGroup, serviceName, productId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return err + } + + return nil + } + + return nil +} + +func TestAccAzureRMApiManagementProduct_update(t *testing.T) { + resourceName := "azurerm_api_management_product.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementProductDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementProduct_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "approval_required", "false"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Product"), + resource.TestCheckResourceAttr(resourceName, "product_id", "test-product"), + resource.TestCheckResourceAttr(resourceName, "published", "false"), + resource.TestCheckResourceAttr(resourceName, "subscription_required", "false"), + resource.TestCheckResourceAttr(resourceName, "terms", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMApiManagementProduct_updated(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "approval_required", "true"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Updated Product"), + resource.TestCheckResourceAttr(resourceName, "product_id", "test-product"), + resource.TestCheckResourceAttr(resourceName, "published", "true"), + resource.TestCheckResourceAttr(resourceName, "subscription_required", "true"), + resource.TestCheckResourceAttr(resourceName, "terms", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMApiManagementProduct_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "approval_required", "false"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Product"), + resource.TestCheckResourceAttr(resourceName, "product_id", "test-product"), + resource.TestCheckResourceAttr(resourceName, "published", "false"), + resource.TestCheckResourceAttr(resourceName, "subscription_required", "false"), + resource.TestCheckResourceAttr(resourceName, "terms", ""), + ), + }, + }, + }) +} + +func TestAccAzureRMApiManagementProduct_subscriptionsLimit(t *testing.T) { + resourceName := "azurerm_api_management_product.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementProductDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementProduct_subscriptionLimits(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "approval_required", "true"), + resource.TestCheckResourceAttr(resourceName, "subscription_required", "true"), + resource.TestCheckResourceAttr(resourceName, "subscriptions_limit", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMApiManagementProduct_complete(t *testing.T) { + resourceName := "azurerm_api_management_product.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementProductDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementProduct_complete(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementProductExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "approval_required", "true"), + resource.TestCheckResourceAttr(resourceName, "description", "This is an example description"), + resource.TestCheckResourceAttr(resourceName, "display_name", "Test Product"), + resource.TestCheckResourceAttr(resourceName, "product_id", "test-product"), + resource.TestCheckResourceAttr(resourceName, "published", "true"), + resource.TestCheckResourceAttr(resourceName, "subscriptions_limit", "2"), + resource.TestCheckResourceAttr(resourceName, "subscription_required", "true"), + resource.TestCheckResourceAttr(resourceName, "terms", "These are some example terms and conditions"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testCheckAzureRMApiManagementProductExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + productId := rs.Primary.Attributes["product_id"] + serviceName := rs.Primary.Attributes["api_management_name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + conn := testAccProvider.Meta().(*ArmClient).apiManagementProductsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := conn.Get(ctx, resourceGroup, serviceName, productId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Bad: Product %q (API Management Service %q / Resource Group %q) does not exist", productId, serviceName, resourceGroup) + } + + return fmt.Errorf("Bad: Get on apiManagementProductsClient: %+v", err) + } + + return nil + } +} + +func testAccAzureRMApiManagementProduct_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_product" "test" { + product_id = "test-product" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + display_name = "Test Product" + subscription_required = false + published = false +} +`, rInt, location, rInt) +} + +func testAccAzureRMApiManagementProduct_requiresImport(rInt int, location string) string { + template := testAccAzureRMApiManagementProduct_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_product" "import" { + product_id = "${azurerm_api_management_product.test.product_id}" + api_management_name = "${azurerm_api_management_product.test.api_management_name}" + resource_group_name = "${azurerm_api_management_product.test.resource_group_name}" + display_name = "${azurerm_api_management_product.test.display_name}" + subscription_required = "${azurerm_api_management_product.test.subscription_required}" + approval_required = "${azurerm_api_management_product.test.approval_required}" + published = "${azurerm_api_management_product.test.published}" +} +`, template) +} + +func testAccAzureRMApiManagementProduct_updated(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_product" "test" { + product_id = "test-product" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + display_name = "Test Updated Product" + subscription_required = true + approval_required = true + published = true +} +`, rInt, location, rInt) +} + +func testAccAzureRMApiManagementProduct_subscriptionLimits(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_product" "test" { + product_id = "test-product" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + display_name = "Test Product" + subscription_required = true + approval_required = true + subscriptions_limit = 2 + published = false +} +`, rInt, location, rInt) +} + +func testAccAzureRMApiManagementProduct_complete(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_product" "test" { + product_id = "test-product" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + display_name = "Test Product" + subscription_required = true + approval_required = true + published = true + subscriptions_limit = 2 + description = "This is an example description" + terms = "These are some example terms and conditions" +} +`, rInt, location, rInt) +} diff --git a/azurerm/resource_arm_api_management_test.go b/azurerm/resource_arm_api_management_test.go index 7267e12a908c..7dd0a045eb68 100644 --- a/azurerm/resource_arm_api_management_test.go +++ b/azurerm/resource_arm_api_management_test.go @@ -316,7 +316,7 @@ resource "azurerm_api_management" "test" { capacity = 1 } - tags { + tags = { "Acceptance" = "Test" } diff --git a/azurerm/resource_arm_api_management_user.go b/azurerm/resource_arm_api_management_user.go new file mode 100644 index 000000000000..5fef2c29a93e --- /dev/null +++ b/azurerm/resource_arm_api_management_user.go @@ -0,0 +1,218 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmApiManagementUser() *schema.Resource { + return &schema.Resource{ + Create: resourceArmApiManagementUserCreateUpdate, + Read: resourceArmApiManagementUserRead, + Update: resourceArmApiManagementUserCreateUpdate, + Delete: resourceArmApiManagementUserDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "user_id": azure.SchemaApiManagementUserName(), + + "api_management_name": azure.SchemaApiManagementName(), + + "resource_group_name": resourceGroupNameSchema(), + + "first_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "email": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "last_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "confirmation": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(apimanagement.Invite), + string(apimanagement.Signup), + }, false), + }, + + "note": { + Type: schema.TypeString, + Optional: true, + }, + + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + string(apimanagement.UserStateActive), + string(apimanagement.UserStateBlocked), + string(apimanagement.UserStatePending), + }, false), + }, + }, + } +} + +func resourceArmApiManagementUserCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementUsersClient + ctx := meta.(*ArmClient).StopContext + + log.Printf("[INFO] preparing arguments for API Management User creation.") + + resourceGroup := d.Get("resource_group_name").(string) + serviceName := d.Get("api_management_name").(string) + userId := d.Get("user_id").(string) + + firstName := d.Get("first_name").(string) + lastName := d.Get("last_name").(string) + email := d.Get("email").(string) + state := d.Get("state").(string) + note := d.Get("note").(string) + password := d.Get("password").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serviceName, userId) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing User %q (API Management Service %q / Resource Group %q): %s", userId, serviceName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_api_management_user", *existing.ID) + } + } + + properties := apimanagement.UserCreateParameters{ + UserCreateParameterProperties: &apimanagement.UserCreateParameterProperties{ + FirstName: utils.String(firstName), + LastName: utils.String(lastName), + Email: utils.String(email), + }, + } + + confirmation := d.Get("confirmation").(string) + if confirmation != "" { + properties.UserCreateParameterProperties.Confirmation = apimanagement.Confirmation(confirmation) + } + if note != "" { + properties.UserCreateParameterProperties.Note = utils.String(note) + } + if password != "" { + properties.UserCreateParameterProperties.Password = utils.String(password) + } + if state != "" { + properties.UserCreateParameterProperties.State = apimanagement.UserState(state) + } + + if _, err := client.CreateOrUpdate(ctx, resourceGroup, serviceName, userId, properties, ""); err != nil { + return fmt.Errorf("Error creating/updating User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + } + + read, err := client.Get(ctx, resourceGroup, serviceName, userId) + if err != nil { + return fmt.Errorf("Error retrieving User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + } + + if read.ID == nil { + return fmt.Errorf("Cannot read ID for User %q (API Management Service %q / Resource Group %q)", userId, serviceName, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmApiManagementUserRead(d, meta) +} + +func resourceArmApiManagementUserRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementUsersClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resourceGroup := id.ResourceGroup + serviceName := id.Path["service"] + userId := id.Path["users"] + + resp, err := client.Get(ctx, resourceGroup, serviceName, userId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("User %q was not found in API Management Service %q / Resource Group %q - removing from state!", userId, serviceName, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + } + + d.Set("user_id", userId) + d.Set("api_management_name", serviceName) + d.Set("resource_group_name", resourceGroup) + + if props := resp.UserContractProperties; props != nil { + d.Set("first_name", props.FirstName) + d.Set("last_name", props.LastName) + d.Set("email", props.Email) + d.Set("note", props.Note) + d.Set("state", string(props.State)) + } + + return nil +} + +func resourceArmApiManagementUserDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).apiManagementUsersClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + serviceName := id.Path["service"] + userId := id.Path["users"] + + log.Printf("[DEBUG] Deleting User %q (API Management Service %q / Resource Grouo %q)", userId, serviceName, resourceGroup) + deleteSubscriptions := utils.Bool(true) + notify := utils.Bool(false) + resp, err := client.Delete(ctx, resourceGroup, serviceName, userId, "", deleteSubscriptions, notify) + if err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("Error deleting User %q (API Management Service %q / Resource Group %q): %+v", userId, serviceName, resourceGroup, err) + } + } + + return nil +} diff --git a/azurerm/resource_arm_api_management_user_test.go b/azurerm/resource_arm_api_management_user_test.go new file mode 100644 index 000000000000..6fa655c423f5 --- /dev/null +++ b/azurerm/resource_arm_api_management_user_test.go @@ -0,0 +1,442 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMApiManagementUser_basic(t *testing.T) { + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "first_name", "Acceptance"), + resource.TestCheckResourceAttr(resourceName, "last_name", "Test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMApiManagementUser_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + ), + }, + { + Config: testAccAzureRMApiManagementUser_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_api_management_user"), + }, + }, + }) +} + +func TestAccAzureRMApiManagementUser_update(t *testing.T) { + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "first_name", "Acceptance"), + resource.TestCheckResourceAttr(resourceName, "last_name", "Test"), + resource.TestCheckResourceAttr(resourceName, "state", "active"), + ), + }, + { + Config: testAccAzureRMApiManagementUser_updatedBlocked(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "first_name", "Acceptance Updated"), + resource.TestCheckResourceAttr(resourceName, "last_name", "Test Updated"), + resource.TestCheckResourceAttr(resourceName, "state", "blocked"), + ), + }, + { + Config: testAccAzureRMApiManagementUser_updatedActive(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "first_name", "Acceptance"), + resource.TestCheckResourceAttr(resourceName, "last_name", "Test"), + resource.TestCheckResourceAttr(resourceName, "state", "active"), + ), + }, + }, + }) +} + +func TestAccAzureRMApiManagementUser_password(t *testing.T) { + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_password(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "first_name", "Acceptance"), + resource.TestCheckResourceAttr(resourceName, "last_name", "Test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + }, + }) +} + +func TestAccAzureRMApiManagementUser_invite(t *testing.T) { + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_invited(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + // not returned + "confirmation", + }, + }, + }, + }) +} + +func TestAccAzureRMApiManagementUser_signup(t *testing.T) { + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_signUp(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + // not returned + "confirmation", + }, + }, + }, + }) +} + +func TestAccAzureRMApiManagementUser_complete(t *testing.T) { + resourceName := "azurerm_api_management_user.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApiManagementUserDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApiManagementUser_complete(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApiManagementUserExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "first_name", "Acceptance"), + resource.TestCheckResourceAttr(resourceName, "last_name", "Test"), + resource.TestCheckResourceAttr(resourceName, "note", "Used for testing in dimension C-137."), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + // not returned + "confirmation", + }, + }, + }, + }) +} + +func testCheckAzureRMApiManagementUserDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).apiManagementUsersClient + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_api_management_user" { + continue + } + + userId := rs.Primary.Attributes["user_id"] + serviceName := rs.Primary.Attributes["api_management_name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := conn.Get(ctx, resourceGroup, serviceName, userId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return err + } + + return nil + } + + return nil +} + +func testCheckAzureRMApiManagementUserExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + userId := rs.Primary.Attributes["user_id"] + serviceName := rs.Primary.Attributes["api_management_name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + conn := testAccProvider.Meta().(*ArmClient).apiManagementUsersClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := conn.Get(ctx, resourceGroup, serviceName, userId) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Bad: User %q (API Management Service %q / Resource Group %q) does not exist", userId, serviceName, resourceGroup) + } + + return fmt.Errorf("Bad: Get on apiManagementUsersClient: %+v", err) + } + + return nil + } +} + +func testAccAzureRMApiManagementUser_basic(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test" + email = "azure-acctest%d@example.com" +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_requiresImport(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "import" { + user_id = "${azurerm_api_management_user.test.user_id}" + api_management_name = "${azurerm_api_management_user.test.api_management_name}" + resource_group_name = "${azurerm_api_management_user.test.resource_group_name}" + first_name = "${azurerm_api_management_user.test.first_name}" + last_name = "${azurerm_api_management_user.test.last_name}" + email = "${azurerm_api_management_user.test.email}" + state = "${azurerm_api_management_user.test.state}" +} +`, template) +} + +func testAccAzureRMApiManagementUser_password(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test" + email = "azure-acctest%d@example.com" + state = "active" + password = "3991bb15-282d-4b9b-9de3-3d5fc89eb530" +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_updatedActive(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test" + email = "azure-acctest%d@example.com" + state = "active" +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_updatedBlocked(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance Updated" + last_name = "Test Updated" + email = "azure-acctest%d@example.com" + state = "blocked" +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_invited(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test User" + email = "azure-acctest%d@example.com" + state = "blocked" + confirmation = "invite" +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_signUp(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test User" + email = "azure-acctest%d@example.com" + state = "blocked" + confirmation = "signup" +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_complete(rInt int, location string) string { + template := testAccAzureRMApiManagementUser_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_user" "test" { + user_id = "acctestuser%d" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Acceptance" + last_name = "Test" + email = "azure-acctest%d@example.com" + state = "active" + confirmation = "signup" + note = "Used for testing in dimension C-137." +} +`, template, rInt, rInt) +} + +func testAccAzureRMApiManagementUser_template(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} +`, rInt, location, rInt) +} diff --git a/azurerm/resource_arm_app_service.go b/azurerm/resource_arm_app_service.go index 2faae84d47e8..306242454324 100644 --- a/azurerm/resource_arm_app_service.go +++ b/azurerm/resource_arm_app_service.go @@ -82,6 +82,11 @@ func resourceArmAppService() *schema.Resource { Default: false, }, + "client_cert_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "enabled": { Type: schema.TypeBool, Optional: true, @@ -250,6 +255,11 @@ func resourceArmAppServiceCreate(d *schema.ResourceData, meta interface{}) error siteEnvelope.SiteProperties.ClientAffinityEnabled = utils.Bool(enabled) } + if v, ok := d.GetOkExists("client_cert_enabled"); ok { + certEnabled := v.(bool) + siteEnvelope.SiteProperties.ClientCertEnabled = utils.Bool(certEnabled) + } + createFuture, err := client.CreateOrUpdate(ctx, resGroup, name, siteEnvelope) if err != nil { return err @@ -303,6 +313,11 @@ func resourceArmAppServiceUpdate(d *schema.ResourceData, meta interface{}) error }, } + if v, ok := d.GetOkExists("client_cert_enabled"); ok { + certEnabled := v.(bool) + siteEnvelope.SiteProperties.ClientCertEnabled = utils.Bool(certEnabled) + } + future, err := client.CreateOrUpdate(ctx, resGroup, name, siteEnvelope) if err != nil { return err @@ -453,6 +468,7 @@ func resourceArmAppServiceRead(d *schema.ResourceData, meta interface{}) error { d.Set("client_affinity_enabled", props.ClientAffinityEnabled) d.Set("enabled", props.Enabled) d.Set("https_only", props.HTTPSOnly) + d.Set("client_cert_enabled", props.ClientCertEnabled) d.Set("default_site_hostname", props.DefaultHostName) d.Set("outbound_ip_addresses", props.OutboundIPAddresses) d.Set("possible_outbound_ip_addresses", props.PossibleOutboundIPAddresses) diff --git a/azurerm/resource_arm_app_service_custom_hostname_binding_test.go b/azurerm/resource_arm_app_service_custom_hostname_binding_test.go index ec88a60253e8..aa6444e5a2ef 100644 --- a/azurerm/resource_arm_app_service_custom_hostname_binding_test.go +++ b/azurerm/resource_arm_app_service_custom_hostname_binding_test.go @@ -54,7 +54,7 @@ func testAccAzureRMAppServiceCustomHostnameBinding_basic(t *testing.T, appServic location := testLocation() config := testAccAzureRMAppServiceCustomHostnameBinding_basicConfig(ri, location, appServiceEnv, domainEnv) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMAppServiceCustomHostnameBindingDestroy, @@ -84,7 +84,7 @@ func testAccAzureRMAppServiceCustomHostnameBinding_requiresImport(t *testing.T, ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMAppServiceCustomHostnameBindingDestroy, @@ -115,7 +115,7 @@ func testAccAzureRMAppServiceCustomHostnameBinding_multiple(t *testing.T, appSer location := testLocation() config := testAccAzureRMAppServiceCustomHostnameBinding_multipleConfig(ri, location, appServiceEnv, domainEnv, altDomainEnv) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMAppServiceCustomHostnameBindingDestroy, diff --git a/azurerm/resource_arm_app_service_plan_test.go b/azurerm/resource_arm_app_service_plan_test.go index 376a218e3b42..24e32a261aad 100644 --- a/azurerm/resource_arm_app_service_plan_test.go +++ b/azurerm/resource_arm_app_service_plan_test.go @@ -504,7 +504,7 @@ resource "azurerm_app_service_plan" "test" { reserved = false } - tags { + tags = { environment = "Test" } } @@ -532,7 +532,7 @@ resource "azurerm_app_service_plan" "test" { per_site_scaling = true reserved = false - tags { + tags = { environment = "Test" } } diff --git a/azurerm/resource_arm_app_service_slot_test.go b/azurerm/resource_arm_app_service_slot_test.go index f155754d2ae8..8bed1c3f924e 100644 --- a/azurerm/resource_arm_app_service_slot_test.go +++ b/azurerm/resource_arm_app_service_slot_test.go @@ -1114,7 +1114,7 @@ resource "azurerm_app_service_slot" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" app_service_name = "${azurerm_app_service.test.name}" - app_settings { + app_settings = { "foo" = "bar" } } @@ -1612,7 +1612,7 @@ resource "azurerm_app_service_slot" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" app_service_name = "${azurerm_app_service.test.name}" - tags { + tags = { "Hello" = "World" } } @@ -1651,7 +1651,7 @@ resource "azurerm_app_service_slot" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" app_service_name = "${azurerm_app_service.test.name}" - tags { + tags = { "Hello" = "World" "Terraform" = "AcceptanceTests" } @@ -1696,7 +1696,7 @@ resource "azurerm_app_service_slot" "test" { remote_debugging_version = "VS2015" } - tags { + tags = { "Hello" = "World" } } diff --git a/azurerm/resource_arm_app_service_test.go b/azurerm/resource_arm_app_service_test.go index 5a1393eaa967..22873d0145a1 100644 --- a/azurerm/resource_arm_app_service_test.go +++ b/azurerm/resource_arm_app_service_test.go @@ -288,6 +288,40 @@ func TestAccAzureRMAppService_httpsOnly(t *testing.T) { }) } +func TestAccAzureRMAppService_clientCertEnabled(t *testing.T) { + resourceName := "azurerm_app_service.test" + ri := tf.AccRandTimeInt() + configClientCertEnabled := testAccAzureRMAppService_clientCertEnabled(ri, testLocation()) + configClientCertEnabledNotSet := testAccAzureRMAppService_clientCertEnabledNotSet(ri, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAppServiceDestroy, + Steps: []resource.TestStep{ + { + Config: configClientCertEnabled, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAppServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "client_cert_enabled", "true"), + ), + }, + { + Config: configClientCertEnabledNotSet, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAppServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "client_cert_enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMAppService_appSettings(t *testing.T) { resourceName := "azurerm_app_service.test" ri := tf.AccRandTimeInt() @@ -1397,6 +1431,61 @@ resource "azurerm_app_service" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMAppService_clientCertEnabled(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_app_service_plan" "test" { + name = "acctestASP-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + sku { + tier = "Standard" + size = "S1" + } +} + +resource "azurerm_app_service" "test" { + name = "acctestAS-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + app_service_plan_id = "${azurerm_app_service_plan.test.id}" + client_cert_enabled = true +} +`, rInt, location, rInt, rInt) +} + +func testAccAzureRMAppService_clientCertEnabledNotSet(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_app_service_plan" "test" { + name = "acctestASP-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + sku { + tier = "Standard" + size = "S1" + } +} + +resource "azurerm_app_service" "test" { + name = "acctestAS-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + app_service_plan_id = "${azurerm_app_service_plan.test.id}" +} +`, rInt, location, rInt, rInt) +} + func testAccAzureRMAppService_32Bit(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -1483,7 +1572,7 @@ resource "azurerm_app_service" "test" { resource_group_name = "${azurerm_resource_group.test.name}" app_service_plan_id = "${azurerm_app_service_plan.test.id}" - app_settings { + app_settings = { "foo" = "bar" } } @@ -1968,7 +2057,7 @@ resource "azurerm_app_service" "test" { remote_debugging_version = "VS2015" } - tags { + tags = { "Hello" = "World" } } @@ -1999,7 +2088,7 @@ resource "azurerm_app_service" "test" { resource_group_name = "${azurerm_resource_group.test.name}" app_service_plan_id = "${azurerm_app_service_plan.test.id}" - tags { + tags = { "Hello" = "World" } } @@ -2030,7 +2119,7 @@ resource "azurerm_app_service" "test" { resource_group_name = "${azurerm_resource_group.test.name}" app_service_plan_id = "${azurerm_app_service_plan.test.id}" - tags { + tags = { "Hello" = "World" "Terraform" = "AcceptanceTests" } @@ -2286,7 +2375,7 @@ resource "azurerm_app_service" "test" { linux_fx_version = "DOCKER|(golang:latest)" } - app_settings { + app_settings = { "WEBSITES_ENABLE_APP_SERVICE_STORAGE" = "false" } } diff --git a/azurerm/resource_arm_application_gateway.go b/azurerm/resource_arm_application_gateway.go index 8a85b3323d07..ef093d4e4489 100644 --- a/azurerm/resource_arm_application_gateway.go +++ b/azurerm/resource_arm_application_gateway.go @@ -50,20 +50,20 @@ func resourceArmApplicationGateway() *schema.Resource { Required: true, }, - // TODO: ditch the suffix `_list` in the future - "fqdn_list": { + "fqdns": { Type: schema.TypeList, Optional: true, + Computed: true, MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, }, }, - // TODO: ditch the suffix `_list` in the future - "ip_address_list": { + "ip_addresses": { Type: schema.TypeList, Optional: true, + Computed: true, MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, @@ -71,6 +71,31 @@ func resourceArmApplicationGateway() *schema.Resource { }, }, + // TODO: remove in 2.0 + "fqdn_list": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Deprecated: "`fqdn_list` has been deprecated in favour of the `fqdns` field", + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // TODO: remove in 2.0 + "ip_address_list": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Deprecated: "`ip_address_list` has been deprecated in favour of the `ip_addresses` field", + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validate.IPv4Address, + }, + }, + "id": { Type: schema.TypeString, Computed: true, @@ -90,6 +115,11 @@ func resourceArmApplicationGateway() *schema.Resource { Required: true, }, + "path": { + Type: schema.TypeString, + Optional: true, + }, + "port": { Type: schema.TypeInt, Required: true, @@ -146,6 +176,26 @@ func resourceArmApplicationGateway() *schema.Resource { }, }, + "connection_draining": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + + "drain_timeout_sec": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 3600), + }, + }, + }, + }, + "probe_name": { Type: schema.TypeString, Optional: true, @@ -323,6 +373,33 @@ func resourceArmApplicationGateway() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "custom_error_configuration": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_code": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(network.HTTPStatus403), + string(network.HTTPStatus502), + }, false), + }, + + "custom_error_page_url": { + Type: schema.TypeString, + Required: true, + }, + + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, }, }, }, @@ -737,6 +814,33 @@ func resourceArmApplicationGateway() *schema.Resource { }, }, + "custom_error_configuration": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_code": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(network.HTTPStatus403), + string(network.HTTPStatus502), + }, false), + }, + + "custom_error_page_url": { + Type: schema.TypeString, + Required: true, + }, + + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "tags": tagsSchema(), }, } @@ -785,6 +889,7 @@ func resourceArmApplicationGatewayCreateUpdate(d *schema.ResourceData, meta inte sku := expandApplicationGatewaySku(d) sslCertificates := expandApplicationGatewaySslCertificates(d) sslPolicy := expandApplicationGatewaySslPolicy(d) + customErrorConfigurations := expandApplicationGatewayCustomErrorConfigurations(d.Get("custom_error_configuration").([]interface{})) urlPathMaps := expandApplicationGatewayURLPathMaps(d, gatewayID) gateway := network.ApplicationGateway{ @@ -805,6 +910,7 @@ func resourceArmApplicationGatewayCreateUpdate(d *schema.ResourceData, meta inte Sku: sku, SslCertificates: sslCertificates, SslPolicy: sslPolicy, + CustomErrorConfigurations: customErrorConfigurations, URLPathMaps: urlPathMaps, }, } @@ -941,6 +1047,10 @@ func resourceArmApplicationGatewayRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `ssl_certificate`: %+v", setErr) } + if setErr := d.Set("custom_error_configuration", flattenApplicationGatewayCustomErrorConfigurations(props.CustomErrorConfigurations)); setErr != nil { + return fmt.Errorf("Error setting `custom_error_configuration`: %+v", setErr) + } + urlPathMaps, err := flattenApplicationGatewayURLPathMaps(props.URLPathMaps) if err != nil { return fmt.Errorf("Error flattening `url_path_map`: %+v", err) @@ -1050,12 +1160,24 @@ func expandApplicationGatewayBackendAddressPools(d *schema.ResourceData) *[]netw v := raw.(map[string]interface{}) backendAddresses := make([]network.ApplicationGatewayBackendAddress, 0) - for _, ip := range v["ip_address_list"].([]interface{}) { + for _, ip := range v["fqdns"].([]interface{}) { + backendAddresses = append(backendAddresses, network.ApplicationGatewayBackendAddress{ + Fqdn: utils.String(ip.(string)), + }) + } + for _, ip := range v["ip_addresses"].([]interface{}) { backendAddresses = append(backendAddresses, network.ApplicationGatewayBackendAddress{ IPAddress: utils.String(ip.(string)), }) } + // TODO: remove in 2.0 + for _, ip := range v["ip_address_list"].([]interface{}) { + backendAddresses = append(backendAddresses, network.ApplicationGatewayBackendAddress{ + IPAddress: utils.String(ip.(string)), + }) + } + // TODO: remove in 2.0 for _, ip := range v["fqdn_list"].([]interface{}) { backendAddresses = append(backendAddresses, network.ApplicationGatewayBackendAddress{ Fqdn: utils.String(ip.(string)), @@ -1099,6 +1221,10 @@ func flattenApplicationGatewayBackendAddressPools(input *[]network.ApplicationGa } output := map[string]interface{}{ + "fqdns": fqdnList, + "ip_addresses": ipAddressList, + + // TODO: deprecated - remove in 2.0 "ip_address_list": ipAddressList, "fqdn_list": fqdnList, } @@ -1125,6 +1251,7 @@ func expandApplicationGatewayBackendHTTPSettings(d *schema.ResourceData, gateway v := raw.(map[string]interface{}) name := v["name"].(string) + path := v["path"].(string) port := int32(v["port"].(int)) protocol := v["protocol"].(string) cookieBasedAffinity := v["cookie_based_affinity"].(string) @@ -1135,10 +1262,12 @@ func expandApplicationGatewayBackendHTTPSettings(d *schema.ResourceData, gateway Name: &name, ApplicationGatewayBackendHTTPSettingsPropertiesFormat: &network.ApplicationGatewayBackendHTTPSettingsPropertiesFormat{ CookieBasedAffinity: network.ApplicationGatewayCookieBasedAffinity(cookieBasedAffinity), + Path: utils.String(path), PickHostNameFromBackendAddress: utils.Bool(pickHostNameFromBackendAddress), Port: utils.Int32(port), Protocol: network.ApplicationGatewayProtocol(protocol), RequestTimeout: utils.Int32(requestTimeout), + ConnectionDraining: expandApplicationGatewayConnectionDraining(v), }, } @@ -1193,13 +1322,22 @@ func flattenApplicationGatewayBackendHTTPSettings(input *[]network.ApplicationGa if props := v.ApplicationGatewayBackendHTTPSettingsPropertiesFormat; props != nil { output["cookie_based_affinity"] = string(props.CookieBasedAffinity) + + if path := props.Path; path != nil { + output["path"] = *path + } + output["connection_draining"] = flattenApplicationGatewayConnectionDraining(props.ConnectionDraining) + if port := props.Port; port != nil { output["port"] = int(*port) } + if pickHostNameFromBackendAddress := props.PickHostNameFromBackendAddress; pickHostNameFromBackendAddress != nil { output["pick_host_name_from_backend_address"] = *pickHostNameFromBackendAddress } + output["protocol"] = string(props.Protocol) + if timeout := props.RequestTimeout; timeout != nil { output["request_timeout"] = int(*timeout) } @@ -1245,6 +1383,37 @@ func flattenApplicationGatewayBackendHTTPSettings(input *[]network.ApplicationGa return results, nil } +func expandApplicationGatewayConnectionDraining(d map[string]interface{}) *network.ApplicationGatewayConnectionDraining { + connectionsRaw := d["connection_draining"].([]interface{}) + + if len(connectionsRaw) <= 0 { + return nil + } + + connectionRaw := connectionsRaw[0].(map[string]interface{}) + + return &network.ApplicationGatewayConnectionDraining{ + Enabled: utils.Bool(connectionRaw["enabled"].(bool)), + DrainTimeoutInSec: utils.Int32(int32(connectionRaw["drain_timeout_sec"].(int))), + } +} + +func flattenApplicationGatewayConnectionDraining(input *network.ApplicationGatewayConnectionDraining) []interface{} { + result := map[string]interface{}{} + if input == nil { + return []interface{}{} + } + + if v := input.Enabled; v != nil { + result["enabled"] = *v + } + if v := input.DrainTimeoutInSec; v != nil { + result["drain_timeout_sec"] = *v + } + + return []interface{}{result} +} + func expandApplicationGatewaySslPolicy(d *schema.ResourceData) *network.ApplicationGatewaySslPolicy { vs := d.Get("disabled_ssl_protocols").([]interface{}) results := make([]network.ApplicationGatewaySslProtocol, 0) @@ -1287,6 +1456,8 @@ func expandApplicationGatewayHTTPListeners(d *schema.ResourceData, gatewayID str frontendIPConfigID := fmt.Sprintf("%s/frontendIPConfigurations/%s", gatewayID, frontendIPConfigName) frontendPortID := fmt.Sprintf("%s/frontendPorts/%s", gatewayID, frontendPortName) + customErrorConfigurations := expandApplicationGatewayCustomErrorConfigurations(v["custom_error_configuration"].([]interface{})) + listener := network.ApplicationGatewayHTTPListener{ Name: utils.String(name), ApplicationGatewayHTTPListenerPropertiesFormat: &network.ApplicationGatewayHTTPListenerPropertiesFormat{ @@ -1298,6 +1469,7 @@ func expandApplicationGatewayHTTPListeners(d *schema.ResourceData, gatewayID str }, Protocol: network.ApplicationGatewayProtocol(protocol), RequireServerNameIndication: utils.Bool(requireSNI), + CustomErrorConfigurations: customErrorConfigurations, }, } @@ -1382,6 +1554,8 @@ func flattenApplicationGatewayHTTPListeners(input *[]network.ApplicationGatewayH if sni := props.RequireServerNameIndication; sni != nil { output["require_sni"] = *sni } + + output["custom_error_configuration"] = flattenApplicationGatewayCustomErrorConfigurations(props.CustomErrorConfigurations) } results = append(results, output) @@ -2143,3 +2317,42 @@ func flattenApplicationGatewayWafConfig(input *network.ApplicationGatewayWebAppl return results } + +func expandApplicationGatewayCustomErrorConfigurations(vs []interface{}) *[]network.ApplicationGatewayCustomError { + results := make([]network.ApplicationGatewayCustomError, 0) + + for _, raw := range vs { + v := raw.(map[string]interface{}) + statusCode := v["status_code"].(string) + customErrorPageUrl := v["custom_error_page_url"].(string) + + output := network.ApplicationGatewayCustomError{ + StatusCode: network.ApplicationGatewayCustomErrorStatusCode(statusCode), + CustomErrorPageURL: utils.String(customErrorPageUrl), + } + results = append(results, output) + } + + return &results +} + +func flattenApplicationGatewayCustomErrorConfigurations(input *[]network.ApplicationGatewayCustomError) []interface{} { + results := make([]interface{}, 0) + if input == nil { + return results + } + + for _, v := range *input { + output := map[string]interface{}{} + + output["status_code"] = string(v.StatusCode) + + if v.CustomErrorPageURL != nil { + output["custom_error_page_url"] = *v.CustomErrorPageURL + } + + results = append(results, output) + } + + return results +} diff --git a/azurerm/resource_arm_application_gateway_test.go b/azurerm/resource_arm_application_gateway_test.go index 0d39043b490c..9a1b2e0bdabb 100644 --- a/azurerm/resource_arm_application_gateway_test.go +++ b/azurerm/resource_arm_application_gateway_test.go @@ -38,6 +38,31 @@ func TestAccAzureRMApplicationGateway_basic(t *testing.T) { }) } +func TestAccAzureRMApplicationGateway_overridePath(t *testing.T) { + resourceName := "azurerm_application_gateway.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApplicationGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApplicationGateway_overridePath(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApplicationGatewayExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "backend_http_settings.0.path", "/path1/"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMApplicationGateway_http2(t *testing.T) { resourceName := "azurerm_application_gateway.test" ri := tf.AccRandTimeInt() @@ -162,6 +187,30 @@ func TestAccAzureRMApplicationGateway_pathBasedRouting(t *testing.T) { }) } +func TestAccAzureRMApplicationGateway_customErrorConfigurations(t *testing.T) { + resourceName := "azurerm_application_gateway.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApplicationGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApplicationGateway_customErrorConfigurations(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApplicationGatewayExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMApplicationGateway_probes(t *testing.T) { resourceName := "azurerm_application_gateway.test" ri := tf.AccRandTimeInt() @@ -308,6 +357,48 @@ func TestAccAzureRMApplicationGateway_webApplicationFirewall(t *testing.T) { }) } +func TestAccAzureRMApplicationGateway_connectionDraining(t *testing.T) { + resourceName := "azurerm_application_gateway.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMApplicationGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMApplicationGateway_connectionDraining(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApplicationGatewayExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "backend_http_settings.0.connection_draining.0.enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMApplicationGateway_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMApplicationGatewayExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "sku.0.name", "Standard_Small"), + resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "Standard"), + resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "2"), + resource.TestCheckResourceAttr(resourceName, "waf_configuration.#", "0"), + resource.TestCheckNoResourceAttr(resourceName, "backend_http_settings.0.connection_draining.0.enabled"), + resource.TestCheckNoResourceAttr(resourceName, "backend_http_settings.0.connection_draining.0.drain_timeout_sec"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testCheckAzureRMApplicationGatewayExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -436,6 +527,78 @@ resource "azurerm_application_gateway" "test" { `, template, rInt) } +func testAccAzureRMApplicationGateway_overridePath(rInt int, location string) string { + template := testAccAzureRMApplicationGateway_template(rInt, location) + return fmt.Sprintf(` +%s + +# since these variables are re-used - a locals block makes this more maintainable +locals { + backend_address_pool_name = "${azurerm_virtual_network.test.name}-beap" + frontend_port_name = "${azurerm_virtual_network.test.name}-feport" + frontend_ip_configuration_name = "${azurerm_virtual_network.test.name}-feip" + http_setting_name = "${azurerm_virtual_network.test.name}-be-htst" + listener_name = "${azurerm_virtual_network.test.name}-httplstn" + request_routing_rule_name = "${azurerm_virtual_network.test.name}-rqrt" +} + +resource "azurerm_application_gateway" "test" { + name = "acctestag-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + sku { + name = "Standard_Small" + tier = "Standard" + capacity = 2 + } + + gateway_ip_configuration { + name = "my-gateway-ip-configuration" + subnet_id = "${azurerm_subnet.test.id}" + } + + frontend_port { + name = "${local.frontend_port_name}" + port = 80 + } + + frontend_ip_configuration { + name = "${local.frontend_ip_configuration_name}" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } + + backend_address_pool { + name = "${local.backend_address_pool_name}" + } + + backend_http_settings { + name = "${local.http_setting_name}" + cookie_based_affinity = "Disabled" + path = "/path1/" + port = 80 + protocol = "Http" + request_timeout = 1 + } + + http_listener { + name = "${local.listener_name}" + frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}" + frontend_port_name = "${local.frontend_port_name}" + protocol = "Http" + } + + request_routing_rule { + name = "${local.request_routing_rule_name}" + rule_type = "Basic" + http_listener_name = "${local.listener_name}" + backend_address_pool_name = "${local.backend_address_pool_name}" + backend_http_settings_name = "${local.http_setting_name}" + } +} +`, template, rInt) +} + func testAccAzureRMApplicationGateway_http2(rInt int, location string) string { template := testAccAzureRMApplicationGateway_template(rInt, location) return fmt.Sprintf(` @@ -1320,6 +1483,83 @@ resource "azurerm_application_gateway" "test" { `, template, rInt) } +func testAccAzureRMApplicationGateway_connectionDraining(rInt int, location string) string { + template := testAccAzureRMApplicationGateway_template(rInt, location) + return fmt.Sprintf(` +%s + +# since these variables are re-used - a locals block makes this more maintainable +locals { + backend_address_pool_name = "${azurerm_virtual_network.test.name}-beap" + frontend_port_name = "${azurerm_virtual_network.test.name}-feport" + frontend_ip_configuration_name = "${azurerm_virtual_network.test.name}-feip" + http_setting_name = "${azurerm_virtual_network.test.name}-be-htst" + listener_name = "${azurerm_virtual_network.test.name}-httplstn" + request_routing_rule_name = "${azurerm_virtual_network.test.name}-rqrt" +} + +resource "azurerm_application_gateway" "test" { + name = "acctestag-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + enable_http2 = true + + sku { + name = "Standard_Small" + tier = "Standard" + capacity = 2 + } + + gateway_ip_configuration { + name = "my-gateway-ip-configuration" + subnet_id = "${azurerm_subnet.test.id}" + } + + frontend_port { + name = "${local.frontend_port_name}" + port = 80 + } + + frontend_ip_configuration { + name = "${local.frontend_ip_configuration_name}" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } + + backend_address_pool { + name = "${local.backend_address_pool_name}" + } + + backend_http_settings { + name = "${local.http_setting_name}" + cookie_based_affinity = "Disabled" + port = 80 + protocol = "Http" + request_timeout = 1 + + connection_draining { + enabled = true + drain_timeout_sec = 1984 + } + } + + http_listener { + name = "${local.listener_name}" + frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}" + frontend_port_name = "${local.frontend_port_name}" + protocol = "Http" + } + + request_routing_rule { + name = "${local.request_routing_rule_name}" + rule_type = "Basic" + http_listener_name = "${local.listener_name}" + backend_address_pool_name = "${local.backend_address_pool_name}" + backend_http_settings_name = "${local.http_setting_name}" + } +} +`, template, rInt) +} + func testAccAzureRMApplicationGateway_template(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -1349,3 +1589,94 @@ resource "azurerm_public_ip" "test" { } `, rInt, location, rInt, rInt, rInt) } + +func testAccAzureRMApplicationGateway_customErrorConfigurations(rInt int, location string) string { + template := testAccAzureRMApplicationGateway_template(rInt, location) + return fmt.Sprintf(` +%s + +# since these variables are re-used - a locals block makes this more maintainable +locals { + backend_address_pool_name = "${azurerm_virtual_network.test.name}-beap" + frontend_port_name = "${azurerm_virtual_network.test.name}-feport" + frontend_ip_configuration_name = "${azurerm_virtual_network.test.name}-feip" + http_setting_name = "${azurerm_virtual_network.test.name}-be-htst" + listener_name = "${azurerm_virtual_network.test.name}-httplstn" + request_routing_rule_name = "${azurerm_virtual_network.test.name}-rqrt" + path_rule_name = "${azurerm_virtual_network.test.name}-pathrule1" + url_path_map_name = "${azurerm_virtual_network.test.name}-urlpath1" +} + +resource "azurerm_application_gateway" "test" { + name = "acctestag-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + sku { + name = "Standard_Small" + tier = "Standard" + capacity = 2 + } + + gateway_ip_configuration { + name = "my-gateway-ip-configuration" + subnet_id = "${azurerm_subnet.test.id}" + } + + frontend_port { + name = "${local.frontend_port_name}" + port = 80 + } + + frontend_ip_configuration { + name = "${local.frontend_ip_configuration_name}" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } + + backend_address_pool { + name = "${local.backend_address_pool_name}" + } + + backend_http_settings { + name = "${local.http_setting_name}" + cookie_based_affinity = "Disabled" + port = 80 + protocol = "Http" + request_timeout = 1 + } + + http_listener { + name = "${local.listener_name}" + frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}" + frontend_port_name = "${local.frontend_port_name}" + protocol = "Http" + custom_error_configuration { + status_code = "HttpStatus403" + custom_error_page_url = "http://azure.com/error403_listener.html" + } + custom_error_configuration { + status_code = "HttpStatus502" + custom_error_page_url = "http://azure.com/error502_listener.html" + } + } + + custom_error_configuration { + status_code = "HttpStatus403" + custom_error_page_url = "http://azure.com/error.html" + } + + custom_error_configuration { + status_code = "HttpStatus502" + custom_error_page_url = "http://azure.com/error.html" + } + + request_routing_rule { + name = "${local.request_routing_rule_name}" + rule_type = "Basic" + http_listener_name = "${local.listener_name}" + backend_address_pool_name = "${local.backend_address_pool_name}" + backend_http_settings_name = "${local.http_setting_name}" + } +} +`, template, rInt) +} diff --git a/azurerm/resource_arm_application_security_group_test.go b/azurerm/resource_arm_application_security_group_test.go index 48cd4cf2c84c..e8a31054c64e 100644 --- a/azurerm/resource_arm_application_security_group_test.go +++ b/azurerm/resource_arm_application_security_group_test.go @@ -209,7 +209,7 @@ resource "azurerm_application_security_group" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Hello" = "World" } } diff --git a/azurerm/resource_arm_automation_account_test.go b/azurerm/resource_arm_automation_account_test.go index e0a8fa689b2d..25e3c5a279b0 100644 --- a/azurerm/resource_arm_automation_account_test.go +++ b/azurerm/resource_arm_automation_account_test.go @@ -205,7 +205,7 @@ resource "azurerm_automation_account" "test" { name = "Basic" } - tags { + tags = { "hello" = "world" } } diff --git a/azurerm/resource_arm_automation_schedule.go b/azurerm/resource_arm_automation_schedule.go index 1e42ae35958b..14041a10d472 100644 --- a/azurerm/resource_arm_automation_schedule.go +++ b/azurerm/resource_arm_automation_schedule.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/set" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -222,11 +223,6 @@ func resourceArmAutomationScheduleCreateUpdate(d *schema.ResourceData, meta inte name := d.Get("name").(string) resGroup := d.Get("resource_group_name").(string) - frequency := d.Get("frequency").(string) - - timeZone := d.Get("timezone").(string) - description := d.Get("description").(string) - //CustomizeDiff should ensure one of these two is set //todo remove this once `account_name` is removed accountName := "" @@ -236,6 +232,23 @@ func resourceArmAutomationScheduleCreateUpdate(d *schema.ResourceData, meta inte accountName = v.(string) } + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, accountName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Automation Schedule %q (Account %q / Resource Group %q): %s", name, accountName, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_automation_schedule", *existing.ID) + } + } + + frequency := d.Get("frequency").(string) + timeZone := d.Get("timezone").(string) + description := d.Get("description").(string) + parameters := automation.ScheduleCreateOrUpdateParameters{ Name: &name, ScheduleCreateOrUpdateProperties: &automation.ScheduleCreateOrUpdateProperties{ diff --git a/azurerm/resource_arm_automation_schedule_test.go b/azurerm/resource_arm_automation_schedule_test.go index 95f8cc7686db..300fa33fbefd 100644 --- a/azurerm/resource_arm_automation_schedule_test.go +++ b/azurerm/resource_arm_automation_schedule_test.go @@ -33,6 +33,32 @@ func TestAccAzureRMAutomationSchedule_oneTime_basic(t *testing.T) { }, }) } +func TestAccAzureRMAutomationSchedule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_automation_schedule.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAutomationScheduleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMAutomationSchedule_oneTime_basic(ri, location), + Check: checkAccAzureRMAutomationSchedule_oneTime_basic(resourceName), + }, + { + Config: testAccAzureRMAutomationSchedule_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_automation_schedule"), + }, + }, + }) +} func TestAccAzureRMAutomationSchedule_oneTime_complete(t *testing.T) { resourceName := "azurerm_automation_schedule.test" @@ -338,6 +364,20 @@ resource "azurerm_automation_schedule" "test" { `, testAccAzureRMAutomationSchedule_prerequisites(rInt, location), rInt) } +func testAccAzureRMAutomationSchedule_requiresImport(rInt int, location string) string { + template := testAccAzureRMAutomationSchedule_oneTime_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_automation_schedule" "import" { + name = "${azurerm_automation_schedule.test.name}" + resource_group_name = "${azurerm_automation_schedule.test.resource_group_name}" + automation_account_name = "${azurerm_automation_schedule.test.automation_account_name}" + frequency = "${azurerm_automation_schedule.test.frequency}" +} +`, template) +} + func checkAccAzureRMAutomationSchedule_oneTime_basic(resourceName string) resource.TestCheckFunc { return resource.ComposeAggregateTestCheckFunc( testCheckAzureRMAutomationScheduleExists(resourceName), diff --git a/azurerm/resource_arm_autoscale_setting.go b/azurerm/resource_arm_autoscale_setting.go index 80876ba0f5a0..8de1667ab976 100644 --- a/azurerm/resource_arm_autoscale_setting.go +++ b/azurerm/resource_arm_autoscale_setting.go @@ -12,12 +12,20 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) func resourceArmAutoScaleSetting() *schema.Resource { return &schema.Resource{ + DeprecationMessage: `The 'azurerm_autoscale_setting' resource is deprecated in favour of the renamed version 'azurerm_monitor_autoscale_setting'. + +Information on migrating to the renamed resource can be found here: https://terraform.io/docs/providers/azurerm/guides/migrating-between-renamed-resources.html + +As such the existing 'azurerm_autoscale_setting' resource is deprecated and will be removed in the next major version of the AzureRM Provider (2.0). +`, + Create: resourceArmAutoScaleSettingCreateUpdate, Read: resourceArmAutoScaleSettingRead, Update: resourceArmAutoScaleSettingCreateUpdate, @@ -71,17 +79,17 @@ func resourceArmAutoScaleSetting() *schema.Resource { "minimum": { Type: schema.TypeInt, Required: true, - ValidateFunc: validation.IntBetween(1, 40), + ValidateFunc: validation.IntBetween(0, 1000), }, "maximum": { Type: schema.TypeInt, Required: true, - ValidateFunc: validation.IntBetween(1, 40), + ValidateFunc: validation.IntBetween(0, 1000), }, "default": { Type: schema.TypeInt, Required: true, - ValidateFunc: validation.IntBetween(1, 40), + ValidateFunc: validation.IntBetween(0, 1000), }, }, }, @@ -345,6 +353,20 @@ func resourceArmAutoScaleSettingCreateUpdate(d *schema.ResourceData, meta interf name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing AutoScale Setting %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_autoscale_setting", *existing.ID) + } + } + location := azureRMNormalizeLocation(d.Get("location").(string)) enabled := d.Get("enabled").(bool) targetResourceId := d.Get("target_resource_id").(string) diff --git a/azurerm/resource_arm_autoscale_setting_test.go b/azurerm/resource_arm_autoscale_setting_test.go index 785b8b250c57..30bef543a9d7 100644 --- a/azurerm/resource_arm_autoscale_setting_test.go +++ b/azurerm/resource_arm_autoscale_setting_test.go @@ -5,7 +5,6 @@ import ( "net/http" "testing" - "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -14,9 +13,8 @@ import ( func TestAccAzureRMAutoScaleSetting_basic(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() - config := testAccAzureRMAutoScaleSetting_basic(ri, rs, location) + config := testAccAzureRMAutoScaleSetting_basic(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,12 +42,40 @@ func TestAccAzureRMAutoScaleSetting_basic(t *testing.T) { }) } +func TestAccAzureRMAutoScaleSetting_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMAutoScaleSetting_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAutoScaleSettingExists(resourceName), + ), + }, + { + Config: testAccAzureRMAutoScaleSetting_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_autoscale_setting"), + }, + }, + }) +} + func TestAccAzureRMAutoScaleSetting_multipleProfiles(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() - config := testAccAzureRMAutoScaleSetting_multipleProfiles(ri, rs, location) + config := testAccAzureRMAutoScaleSetting_multipleProfiles(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -70,10 +96,56 @@ func TestAccAzureRMAutoScaleSetting_multipleProfiles(t *testing.T) { }) } +func TestAccAzureRMAutoScaleSetting_update(t *testing.T) { + resourceName := "azurerm_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMAutoScaleSetting_capacity(ri, location, 1, 3, 2), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.minimum", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.maximum", "3"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.default", "2"), + ), + }, + { + Config: testAccAzureRMAutoScaleSetting_capacity(ri, location, 0, 400, 0), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.minimum", "0"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.maximum", "400"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.default", "0"), + ), + }, + { + Config: testAccAzureRMAutoScaleSetting_capacity(ri, location, 2, 45, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.minimum", "2"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.maximum", "45"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.default", "3"), + ), + }, + }, + }) +} + func TestAccAzureRMAutoScaleSetting_multipleRules(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() resource.ParallelTest(t, resource.TestCase{ @@ -82,7 +154,7 @@ func TestAccAzureRMAutoScaleSetting_multipleRules(t *testing.T) { CheckDestroy: testCheckAzureRMAutoScaleSettingDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMAutoScaleSetting_basic(ri, rs, location), + Config: testAccAzureRMAutoScaleSetting_basic(ri, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMAutoScaleSettingExists(resourceName), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), @@ -94,7 +166,7 @@ func TestAccAzureRMAutoScaleSetting_multipleRules(t *testing.T) { ), }, { - Config: testAccAzureRMAutoScaleSetting_multipleRules(ri, rs, location), + Config: testAccAzureRMAutoScaleSetting_multipleRules(ri, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMAutoScaleSettingExists(resourceName), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), @@ -113,7 +185,6 @@ func TestAccAzureRMAutoScaleSetting_multipleRules(t *testing.T) { func TestAccAzureRMAutoScaleSetting_customEmails(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() resource.ParallelTest(t, resource.TestCase{ @@ -122,7 +193,7 @@ func TestAccAzureRMAutoScaleSetting_customEmails(t *testing.T) { CheckDestroy: testCheckAzureRMAutoScaleSettingDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMAutoScaleSetting_email(ri, rs, location), + Config: testAccAzureRMAutoScaleSetting_email(ri, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMAutoScaleSettingExists(resourceName), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), @@ -132,7 +203,7 @@ func TestAccAzureRMAutoScaleSetting_customEmails(t *testing.T) { ), }, { - Config: testAccAzureRMAutoScaleSetting_emailUpdated(ri, rs, location), + Config: testAccAzureRMAutoScaleSetting_emailUpdated(ri, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMAutoScaleSettingExists(resourceName), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), @@ -149,9 +220,8 @@ func TestAccAzureRMAutoScaleSetting_customEmails(t *testing.T) { func TestAccAzureRMAutoScaleSetting_recurrence(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() - config := testAccAzureRMAutoScaleSetting_recurrence(ri, rs, location) + config := testAccAzureRMAutoScaleSetting_recurrence(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -181,7 +251,6 @@ func TestAccAzureRMAutoScaleSetting_recurrence(t *testing.T) { func TestAccAzureRMAutoScaleSetting_recurrenceUpdate(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() resource.ParallelTest(t, resource.TestCase{ @@ -190,7 +259,7 @@ func TestAccAzureRMAutoScaleSetting_recurrenceUpdate(t *testing.T) { CheckDestroy: testCheckAzureRMAutoScaleSettingDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMAutoScaleSetting_recurrence(ri, rs, location), + Config: testAccAzureRMAutoScaleSetting_recurrence(ri, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMAutoScaleSettingExists(resourceName), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), @@ -203,7 +272,7 @@ func TestAccAzureRMAutoScaleSetting_recurrenceUpdate(t *testing.T) { ), }, { - Config: testAccAzureRMAutoScaleSetting_recurrenceUpdated(ri, rs, location), + Config: testAccAzureRMAutoScaleSetting_recurrenceUpdated(ri, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMAutoScaleSettingExists(resourceName), resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.#", "1"), @@ -222,9 +291,8 @@ func TestAccAzureRMAutoScaleSetting_recurrenceUpdate(t *testing.T) { func TestAccAzureRMAutoScaleSetting_fixedDate(t *testing.T) { resourceName := "azurerm_autoscale_setting.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(6) location := testLocation() - config := testAccAzureRMAutoScaleSetting_fixedDate(ri, rs, location) + config := testAccAzureRMAutoScaleSetting_fixedDate(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -306,8 +374,8 @@ func testCheckAzureRMAutoScaleSettingDestroy(s *terraform.State) error { return nil } -func testAccAzureRMAutoScaleSetting_basic(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_basic(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -323,7 +391,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } rule { @@ -350,8 +418,52 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt) } -func testAccAzureRMAutoScaleSetting_multipleProfiles(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_requiresImport(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_autoscale_setting" "import" { + name = "${azurerm_autoscale_setting.test.name}" + resource_group_name = "${azurerm_autoscale_setting.test.resource_group_name}" + location = "${azurerm_autoscale_setting.test.location}" + target_resource_id = "${azurerm_autoscale_setting.test.target_resource_id}" + + profile { + name = "metricRules" + + capacity { + default = 1 + minimum = 1 + maximum = 30 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } +} +`, template) +} + +func testAccAzureRMAutoScaleSetting_multipleProfiles(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -367,7 +479,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } rule { @@ -417,7 +529,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } recurrence { @@ -437,8 +549,8 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt) } -func testAccAzureRMAutoScaleSetting_multipleRules(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_multipleRules(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -455,7 +567,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } rule { @@ -502,8 +614,53 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt) } -func testAccAzureRMAutoScaleSetting_email(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_capacity(rInt int, location string, min int, max int, defaultVal int) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + enabled = false + + profile { + name = "metricRules" + + capacity { + default = %d + minimum = %d + maximum = %d + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } +} +`, template, rInt, defaultVal, min, max) +} + +func testAccAzureRMAutoScaleSetting_email(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -519,7 +676,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } rule { @@ -554,8 +711,8 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt, rInt) } -func testAccAzureRMAutoScaleSetting_emailUpdated(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_emailUpdated(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -571,7 +728,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } rule { @@ -606,8 +763,8 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt, rInt, rInt) } -func testAccAzureRMAutoScaleSetting_recurrence(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_recurrence(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -623,7 +780,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } recurrence { @@ -650,8 +807,8 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt) } -func testAccAzureRMAutoScaleSetting_recurrenceUpdated(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_recurrenceUpdated(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -667,7 +824,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } recurrence { @@ -694,8 +851,8 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt) } -func testAccAzureRMAutoScaleSetting_fixedDate(rInt int, rString string, location string) string { - template := testAccAzureRMAutoScaleSetting_template(rInt, rString, location) +func testAccAzureRMAutoScaleSetting_fixedDate(rInt int, location string) string { + template := testAccAzureRMAutoScaleSetting_template(rInt, location) return fmt.Sprintf(` %s @@ -711,7 +868,7 @@ resource "azurerm_autoscale_setting" "test" { capacity { default = 1 minimum = 1 - maximum = 10 + maximum = 30 } fixed_date { @@ -724,7 +881,7 @@ resource "azurerm_autoscale_setting" "test" { `, template, rInt) } -func testAccAzureRMAutoScaleSetting_template(rInt int, rString string, location string) string { +func testAccAzureRMAutoScaleSetting_template(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" @@ -745,35 +902,17 @@ resource "azurerm_subnet" "test" { address_prefix = "10.0.2.0/24" } -resource "azurerm_storage_account" "test" { - name = "accsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_tier = "Standard" - account_replication_type = "LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" + name = "acctvmss-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + upgrade_policy_mode = "Automatic" + single_placement_group = "false" sku { - name = "Standard_F2" + name = "Standard_DS1_v2" tier = "Standard" - capacity = 2 + capacity = 30 } os_profile { @@ -794,10 +933,10 @@ resource "azurerm_virtual_machine_scale_set" "test" { } storage_profile_os_disk { - name = "osDiskProfile" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + name = "" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "StandardSSD_LRS" } storage_profile_image_reference { @@ -807,5 +946,5 @@ resource "azurerm_virtual_machine_scale_set" "test" { version = "latest" } } -`, rInt, location, rInt, rString, rInt, rInt, rInt) +`, rInt, location, rInt, rInt, rInt, rInt) } diff --git a/azurerm/resource_arm_availability_set_test.go b/azurerm/resource_arm_availability_set_test.go index 000a9cc43b54..470c7899071a 100644 --- a/azurerm/resource_arm_availability_set_test.go +++ b/azurerm/resource_arm_availability_set_test.go @@ -306,7 +306,7 @@ resource "azurerm_availability_set" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -326,7 +326,7 @@ resource "azurerm_availability_set" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_batch_account_test.go b/azurerm/resource_arm_batch_account_test.go index 3f5d7db183ef..25e066a8799c 100644 --- a/azurerm/resource_arm_batch_account_test.go +++ b/azurerm/resource_arm_batch_account_test.go @@ -234,7 +234,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } @@ -263,7 +263,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" version = "2" } diff --git a/azurerm/resource_arm_batch_pool.go b/azurerm/resource_arm_batch_pool.go index 45c631281e5a..5834c1f28c87 100644 --- a/azurerm/resource_arm_batch_pool.go +++ b/azurerm/resource_arm_batch_pool.go @@ -9,6 +9,7 @@ import ( "time" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2017-09-01/batch" @@ -54,6 +55,13 @@ func resourceArmBatchPool() *schema.Resource { ForceNew: true, DiffSuppressFunc: suppress.CaseDifference, }, + "max_tasks_per_node": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, "fixed_scale": { Type: schema.TypeList, Optional: true, @@ -242,11 +250,26 @@ func resourceArmBatchPoolCreate(d *schema.ResourceData, meta interface{}) error poolName := d.Get("name").(string) displayName := d.Get("display_name").(string) vmSize := d.Get("vm_size").(string) + maxTasksPerNode := int32(d.Get("max_tasks_per_node").(int)) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, accountName, poolName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Batch Pool %q (Account %q / Resource Group %q): %+v", poolName, accountName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_batch_pool", *existing.ID) + } + } parameters := batch.Pool{ PoolProperties: &batch.PoolProperties{ - VMSize: &vmSize, - DisplayName: &displayName, + VMSize: &vmSize, + DisplayName: &displayName, + MaxTasksPerNode: &maxTasksPerNode, }, } diff --git a/azurerm/resource_arm_batch_pool_test.go b/azurerm/resource_arm_batch_pool_test.go index 911aa86451d3..cd660d3b2381 100644 --- a/azurerm/resource_arm_batch_pool_test.go +++ b/azurerm/resource_arm_batch_pool_test.go @@ -16,11 +16,42 @@ import ( func TestAccAzureRMBatchPool_basic(t *testing.T) { resourceName := "azurerm_batch_pool.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(4) - location := testLocation() - config := testaccAzureRMBatchPool_basic(ri, rs, location) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBatchPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testaccAzureRMBatchPool_basic(ri, rs, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMBatchPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), + resource.TestCheckResourceAttr(resourceName, "node_agent_sku_id", "batch.node.ubuntu 16.04"), + resource.TestCheckResourceAttr(resourceName, "account_name", fmt.Sprintf("testaccbatch%s", rs)), + resource.TestCheckResourceAttr(resourceName, "storage_image_reference.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_image_reference.0.publisher", "Canonical"), + resource.TestCheckResourceAttr(resourceName, "storage_image_reference.0.sku", "16.04.0-LTS"), + resource.TestCheckResourceAttr(resourceName, "storage_image_reference.0.offer", "UbuntuServer"), + resource.TestCheckResourceAttr(resourceName, "fixed_scale.#", "1"), + resource.TestCheckResourceAttr(resourceName, "fixed_scale.0.target_dedicated_nodes", "1"), + resource.TestCheckResourceAttr(resourceName, "start_task.#", "0"), + ), + }, + }, + }) +} + +func TestAccAzureRMBatchPool_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_batch_pool.test" + ri := tf.AccRandTimeInt() + rs := acctest.RandString(4) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -28,7 +59,7 @@ func TestAccAzureRMBatchPool_basic(t *testing.T) { CheckDestroy: testCheckAzureRMBatchPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testaccAzureRMBatchPool_basic(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMBatchPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), @@ -43,6 +74,10 @@ func TestAccAzureRMBatchPool_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "start_task.#", "0"), ), }, + { + Config: testaccAzureRMBatchPool_requiresImport(ri, rs, testLocation()), + ExpectError: testRequiresImportError("azurerm_batch_account"), + }, }, }) } @@ -50,11 +85,7 @@ func TestAccAzureRMBatchPool_basic(t *testing.T) { func TestAccAzureRMBatchPool_fixedScale_complete(t *testing.T) { resourceName := "azurerm_batch_pool.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(4) - location := testLocation() - - config := testaccAzureRMBatchPool_fixedScale_complete(ri, rs, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -62,10 +93,11 @@ func TestAccAzureRMBatchPool_fixedScale_complete(t *testing.T) { CheckDestroy: testCheckAzureRMBatchPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testaccAzureRMBatchPool_fixedScale_complete(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMBatchPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), + resource.TestCheckResourceAttr(resourceName, "max_tasks_per_node", "2"), resource.TestCheckResourceAttr(resourceName, "node_agent_sku_id", "batch.node.ubuntu 16.04"), resource.TestCheckResourceAttr(resourceName, "account_name", fmt.Sprintf("testaccbatch%s", rs)), resource.TestCheckResourceAttr(resourceName, "storage_image_reference.#", "1"), @@ -87,11 +119,7 @@ func TestAccAzureRMBatchPool_fixedScale_complete(t *testing.T) { func TestAccAzureRMBatchPool_autoScale_complete(t *testing.T) { resourceName := "azurerm_batch_pool.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(4) - location := testLocation() - - config := testaccAzureRMBatchPool_autoScale_complete(ri, rs, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -99,7 +127,7 @@ func TestAccAzureRMBatchPool_autoScale_complete(t *testing.T) { CheckDestroy: testCheckAzureRMBatchPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testaccAzureRMBatchPool_autoScale_complete(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMBatchPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), @@ -122,12 +150,7 @@ func TestAccAzureRMBatchPool_autoScale_complete(t *testing.T) { func TestAccAzureRMBatchPool_completeUpdated(t *testing.T) { resourceName := "azurerm_batch_pool.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(4) - location := testLocation() - - config := testaccAzureRMBatchPool_fixedScale_complete(ri, rs, location) - configUpdate := testaccAzureRMBatchPool_autoScale_complete(ri, rs, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -135,7 +158,7 @@ func TestAccAzureRMBatchPool_completeUpdated(t *testing.T) { CheckDestroy: testCheckAzureRMBatchPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testaccAzureRMBatchPool_fixedScale_complete(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMBatchPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), @@ -154,7 +177,7 @@ func TestAccAzureRMBatchPool_completeUpdated(t *testing.T) { ), }, { - Config: configUpdate, + Config: testaccAzureRMBatchPool_autoScale_complete(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMBatchPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), @@ -177,11 +200,7 @@ func TestAccAzureRMBatchPool_completeUpdated(t *testing.T) { func TestAccAzureRMBatchPoolStartTask_basic(t *testing.T) { resourceName := "azurerm_batch_pool.test" ri := tf.AccRandTimeInt() - rs := acctest.RandString(4) - location := testLocation() - - config := testaccAzureRMBatchPoolStartTask_basic(ri, rs, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -189,7 +208,7 @@ func TestAccAzureRMBatchPoolStartTask_basic(t *testing.T) { CheckDestroy: testCheckAzureRMBatchPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testaccAzureRMBatchPoolStartTask_basic(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMBatchPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "vm_size", "STANDARD_A1"), @@ -295,7 +314,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } @@ -305,7 +324,8 @@ resource "azurerm_batch_pool" "test" { resource_group_name = "${azurerm_resource_group.test.name}" account_name = "${azurerm_batch_account.test.name}" display_name = "Test Acc Pool" - vm_size = "Standard_A1" + vm_size = "Standard_A1", + max_tasks_per_node = 2 node_agent_sku_id = "batch.node.ubuntu 16.04" fixed_scale { @@ -344,7 +364,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } @@ -413,6 +433,31 @@ resource "azurerm_batch_pool" "test" { `, rInt, location, rString, rString) } +func testaccAzureRMBatchPool_requiresImport(rInt int, rString string, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_batch_pool" "import" { + name = "${azurerm_batch_pool.test.name}" + resource_group_name = "${azurerm_batch_pool.test.resource_group_name}" + account_name = "${azurerm_batch_pool.test.account_name}" + node_agent_sku_id = "${azurerm_batch_pool.test.node_agent_sku_id}" + vm_size = "${azurerm_batch_pool.test.vm_size}" + + fixed_scale { + target_dedicated_nodes = 1 + } + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04.0-LTS" + version = "latest" + } +} +`, testaccAzureRMBatchPool_basic(rInt, location, rString)) +} + func testaccAzureRMBatchPoolStartTask_basic(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -449,7 +494,7 @@ resource "azurerm_batch_pool" "test" { max_task_retry_count = 1 wait_for_success = true - environment { + environment = { env = "TEST", bu = "Research&Dev" } diff --git a/azurerm/resource_arm_cdn_endpoint_test.go b/azurerm/resource_arm_cdn_endpoint_test.go index 4d52be8125b5..a4a3266e17e2 100644 --- a/azurerm/resource_arm_cdn_endpoint_test.go +++ b/azurerm/resource_arm_cdn_endpoint_test.go @@ -430,7 +430,7 @@ resource "azurerm_cdn_endpoint" "test" { http_port = 80 } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -465,7 +465,7 @@ resource "azurerm_cdn_endpoint" "test" { http_port = 80 } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -500,7 +500,7 @@ resource "azurerm_cdn_endpoint" "test" { http_port = 80 } - tags { + tags = { environment = "staging" } } @@ -628,7 +628,7 @@ resource "azurerm_cdn_endpoint" "test" { country_codes = ["GB"] } - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_cdn_profile.go b/azurerm/resource_arm_cdn_profile.go index 7885fe26ec50..811a4cda9d8b 100644 --- a/azurerm/resource_arm_cdn_profile.go +++ b/azurerm/resource_arm_cdn_profile.go @@ -41,8 +41,7 @@ func resourceArmCdnProfile() *schema.Resource { string(cdn.StandardAkamai), string(cdn.StandardChinaCdn), string(cdn.StandardVerizon), - // TODO: replace this with an SDK constant once available - "Standard_Microsoft", + string(cdn.StandardMicrosoft), string(cdn.PremiumVerizon), }, true), DiffSuppressFunc: ignoreCaseDiffSuppressFunc, diff --git a/azurerm/resource_arm_cdn_profile_test.go b/azurerm/resource_arm_cdn_profile_test.go index 2bb2106749a4..856a81150178 100644 --- a/azurerm/resource_arm_cdn_profile_test.go +++ b/azurerm/resource_arm_cdn_profile_test.go @@ -311,7 +311,7 @@ resource "azurerm_cdn_profile" "test" { resource_group_name = "${azurerm_resource_group.test.name}" sku = "Standard_Verizon" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -332,7 +332,7 @@ resource "azurerm_cdn_profile" "test" { resource_group_name = "${azurerm_resource_group.test.name}" sku = "Standard_Verizon" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_cognitive_account.go b/azurerm/resource_arm_cognitive_account.go index d7573805931a..b4acb2f71ecd 100644 --- a/azurerm/resource_arm_cognitive_account.go +++ b/azurerm/resource_arm_cognitive_account.go @@ -100,6 +100,18 @@ func resourceArmCognitiveAccount() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "primary_access_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + + "secondary_access_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, }, } } @@ -192,6 +204,7 @@ func resourceArmCognitiveAccountRead(d *schema.ResourceData, meta interface{}) e name := id.Path["accounts"] resp, err := client.GetProperties(ctx, resourceGroup, name) + if err != nil { if utils.ResponseWasNotFound(resp.Response) { log.Printf("[DEBUG] Cognitive Services Account %q was not found in Resource Group %q - removing from state!", name, resourceGroup) @@ -209,7 +222,7 @@ func resourceArmCognitiveAccountRead(d *schema.ResourceData, meta interface{}) e d.Set("location", azureRMNormalizeLocation(*location)) } - if err := d.Set("sku", flattenCognitiveAccountSku(resp.Sku)); err != nil { + if err = d.Set("sku", flattenCognitiveAccountSku(resp.Sku)); err != nil { return fmt.Errorf("Error setting `sku`: %+v", err) } @@ -217,6 +230,21 @@ func resourceArmCognitiveAccountRead(d *schema.ResourceData, meta interface{}) e d.Set("endpoint", props.Endpoint) } + keys, err := client.ListKeys(ctx, resourceGroup, name) + + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Not able to obtain keys for Cognitive Services Account %q in Resource Group %q - removing from state!", name, resourceGroup) + d.SetId("") + return nil + } + return fmt.Errorf("Error obtaining keys for Cognitive Services Account %q in Resource Group %q: %v", name, resourceGroup, err) + } + + d.Set("primary_access_key", keys.Key1) + + d.Set("secondary_access_key", keys.Key2) + flattenAndSetTags(d, resp.Tags) return nil diff --git a/azurerm/resource_arm_cognitive_account_test.go b/azurerm/resource_arm_cognitive_account_test.go index 5ac7a917464a..0e118df4484b 100644 --- a/azurerm/resource_arm_cognitive_account_test.go +++ b/azurerm/resource_arm_cognitive_account_test.go @@ -27,6 +27,8 @@ func TestAccAzureRMCognitiveAccount_basic(t *testing.T) { testCheckAzureRMCognitiveAccountExists(resourceName), resource.TestCheckResourceAttr(resourceName, "kind", "Face"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), ), }, { @@ -54,6 +56,8 @@ func TestAccAzureRMCognitiveAccount_speechServices(t *testing.T) { testCheckAzureRMCognitiveAccountExists(resourceName), resource.TestCheckResourceAttr(resourceName, "kind", "SpeechServices"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), ), }, { @@ -111,6 +115,8 @@ func TestAccAzureRMCognitiveAccount_complete(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kind", "Face"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.Acceptance", "Test"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), ), }, { @@ -138,6 +144,8 @@ func TestAccAzureRMCognitiveAccount_update(t *testing.T) { testCheckAzureRMCognitiveAccountExists(resourceName), resource.TestCheckResourceAttr(resourceName, "kind", "Face"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), ), }, { @@ -147,6 +155,8 @@ func TestAccAzureRMCognitiveAccount_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kind", "Face"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.Acceptance", "Test"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), ), }, }, @@ -285,7 +295,7 @@ resource "azurerm_cognitive_account" "test" { tier = "Standard" } - tags { + tags = { Acceptance = "Test" } } diff --git a/azurerm/resource_arm_connection_monitor.go b/azurerm/resource_arm_connection_monitor.go new file mode 100644 index 000000000000..a3688ea83930 --- /dev/null +++ b/azurerm/resource_arm_connection_monitor.go @@ -0,0 +1,332 @@ +package azurerm + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceArmConnectionMonitor() *schema.Resource { + return &schema.Resource{ + Create: resourceArmConnectionMonitorCreateUpdate, + Read: resourceArmConnectionMonitorRead, + Update: resourceArmConnectionMonitorCreateUpdate, + Delete: resourceArmConnectionMonitorDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "resource_group_name": resourceGroupNameSchema(), + + "network_watcher_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "location": locationSchema(), + + "auto_start": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + + "interval_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + ValidateFunc: validation.IntAtLeast(30), + }, + + "source": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "virtual_machine_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: azure.ValidateResourceID, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validate.PortNumberOrZero, + }, + }, + }, + }, + + "destination": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "virtual_machine_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"destination.0.address"}, + }, + "address": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"destination.0.virtual_machine_id"}, + }, + "port": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validate.PortNumber, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceArmConnectionMonitorCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).connectionMonitorsClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("name").(string) + watcherName := d.Get("network_watcher_name").(string) + resourceGroup := d.Get("resource_group_name").(string) + location := azureRMNormalizeLocation(d.Get("location").(string)) + autoStart := d.Get("auto_start").(bool) + intervalInSeconds := int32(d.Get("interval_in_seconds").(int)) + + source, err := expandArmConnectionMonitorSource(d) + if err != nil { + return err + } + + dest, err := expandArmConnectionMonitorDestination(d) + if err != nil { + return err + } + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, watcherName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Connection Monitor %q (Watcher %q / Resource Group %q): %s", name, watcherName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_connection_monitor", *existing.ID) + } + } + + tags := d.Get("tags").(map[string]interface{}) + + properties := network.ConnectionMonitor{ + Location: utils.String(location), + Tags: expandTags(tags), + ConnectionMonitorParameters: &network.ConnectionMonitorParameters{ + Source: source, + Destination: dest, + AutoStart: utils.Bool(autoStart), + MonitoringIntervalInSeconds: utils.Int32(intervalInSeconds), + }, + } + + future, err := client.CreateOrUpdate(ctx, resourceGroup, watcherName, name, properties) + if err != nil { + return fmt.Errorf("Error creating Connection Monitor %q (Watcher %q / Resource Group %q): %+v", name, watcherName, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for completion of Connection Monitor %q (Watcher %q / Resource Group %q): %+v", name, watcherName, resourceGroup, err) + } + + resp, err := client.Get(ctx, resourceGroup, watcherName, name) + if err != nil { + return fmt.Errorf("Error retrieving Connection Monitor %q (Watcher %q / Resource Group %q): %+v", name, watcherName, resourceGroup, err) + } + if resp.ID == nil { + return fmt.Errorf("Cannot read Connection Monitor %q (Watcher %q / Resource Group %q) ID", name, watcherName, resourceGroup) + } + + d.SetId(*resp.ID) + + return resourceArmConnectionMonitorRead(d, meta) +} + +func resourceArmConnectionMonitorRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).connectionMonitorsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + watcherName := id.Path["networkWatchers"] + name := id.Path["connectionMonitors"] + + resp, err := client.Get(ctx, resourceGroup, watcherName, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading Connection Monitor %q (Watcher %q / Resource Group %q) %+v", name, watcherName, resourceGroup, err) + } + + d.Set("name", name) + d.Set("network_watcher_name", watcherName) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + + if props := resp.ConnectionMonitorResultProperties; props != nil { + d.Set("auto_start", props.AutoStart) + d.Set("interval_in_seconds", props.MonitoringIntervalInSeconds) + + source := flattenArmConnectionMonitorSource(props.Source) + if err := d.Set("source", source); err != nil { + return fmt.Errorf("Error setting `source`: %+v", err) + } + + dest := flattenArmConnectionMonitorDestination(props.Destination) + if err := d.Set("destination", dest); err != nil { + return fmt.Errorf("Error setting `destination`: %+v", err) + } + } + + flattenAndSetTags(d, resp.Tags) + + return nil +} + +func resourceArmConnectionMonitorDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).connectionMonitorsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + watcherName := id.Path["networkWatchers"] + name := id.Path["connectionMonitors"] + + future, err := client.Delete(ctx, resourceGroup, watcherName, name) + if err != nil { + if !response.WasNotFound(future.Response()) { + return fmt.Errorf("Error deleting Connection Monitor %q (Watcher %q / Resource Group %q): %+v", name, watcherName, resourceGroup, err) + } + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for the deletion of Connection Monitor %q (Watcher %q / Resource Group %q): %+v", name, watcherName, resourceGroup, err) + } + + return nil +} + +func flattenArmConnectionMonitorSource(input *network.ConnectionMonitorSource) []interface{} { + if input == nil { + return []interface{}{} + } + + output := make(map[string]interface{}) + + if resourceID := input.ResourceID; resourceID != nil { + output["virtual_machine_id"] = *resourceID + } + if port := input.Port; port != nil { + output["port"] = *port + } + + return []interface{}{output} +} + +func expandArmConnectionMonitorSource(d *schema.ResourceData) (*network.ConnectionMonitorSource, error) { + sources := d.Get("source").([]interface{}) + source := sources[0].(map[string]interface{}) + + monitorSource := network.ConnectionMonitorSource{} + if v := source["virtual_machine_id"]; v != "" { + monitorSource.ResourceID = utils.String(v.(string)) + } + if v := source["port"]; v != "" { + monitorSource.Port = utils.Int32(int32(v.(int))) + } + + return &monitorSource, nil +} + +func flattenArmConnectionMonitorDestination(input *network.ConnectionMonitorDestination) []interface{} { + if input == nil { + return []interface{}{} + } + + output := make(map[string]interface{}) + + // When monitoring a VM, the address field will contain the current address + // of the VM. We only want to copy over the address field if the virtual + // machine field is not set to avoid unwanted diffs. + if resourceID := input.ResourceID; resourceID != nil { + output["virtual_machine_id"] = *resourceID + } else if address := input.Address; address != nil { + output["address"] = *address + } + + if port := input.Port; port != nil { + output["port"] = *port + } + + return []interface{}{output} +} + +func expandArmConnectionMonitorDestination(d *schema.ResourceData) (*network.ConnectionMonitorDestination, error) { + dests := d.Get("destination").([]interface{}) + dest := dests[0].(map[string]interface{}) + + monitorDest := network.ConnectionMonitorDestination{} + + if v := dest["virtual_machine_id"]; v != "" { + monitorDest.ResourceID = utils.String(v.(string)) + } + if v := dest["address"]; v != "" { + monitorDest.Address = utils.String(v.(string)) + } + if v := dest["port"]; v != "" { + monitorDest.Port = utils.Int32(int32(v.(int))) + } + + if monitorDest.ResourceID == nil && monitorDest.Address == nil { + return nil, fmt.Errorf("Error: either `destination.virtual_machine_id` or `destination.address` must be specified") + } + + return &monitorDest, nil +} diff --git a/azurerm/resource_arm_connection_monitor_test.go b/azurerm/resource_arm_connection_monitor_test.go new file mode 100644 index 000000000000..ab1bc64222b2 --- /dev/null +++ b/azurerm/resource_arm_connection_monitor_test.go @@ -0,0 +1,697 @@ +package azurerm + +import ( + "fmt" + "net/http" + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func testAccAzureRMConnectionMonitor_addressBasic(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_basicAddressConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "resource_group_name"), + resource.TestCheckResourceAttr(resourceName, "location", azureRMNormalizeLocation(location)), + resource.TestCheckResourceAttr(resourceName, "auto_start", "true"), + resource.TestCheckResourceAttr(resourceName, "interval_in_seconds", "60"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_basicAddressConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + ), + }, + { + Config: testAccAzureRMConnectionMonitor_requiresImportConfig(ri, location), + ExpectError: testRequiresImportError("azurerm_connection_monitor"), + }, + }, + }) + +} + +func testAccAzureRMConnectionMonitor_addressComplete(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + autoStart := "false" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_completeAddressConfig(ri, location, autoStart), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_start", "false"), + resource.TestCheckResourceAttr(resourceName, "interval_in_seconds", "30"), + resource.TestCheckResourceAttr(resourceName, "source.0.port", "20020"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_addressUpdate(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + autoStart := "true" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_basicAddressConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + ), + }, + { + Config: testAccAzureRMConnectionMonitor_completeAddressConfig(ri, location, autoStart), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_start", "true"), + resource.TestCheckResourceAttr(resourceName, "interval_in_seconds", "30"), + resource.TestCheckResourceAttr(resourceName, "source.0.port", "20020"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_vmBasic(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_basicVmConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "resource_group_name"), + resource.TestCheckResourceAttr(resourceName, "location", azureRMNormalizeLocation(location)), + resource.TestCheckResourceAttr(resourceName, "auto_start", "true"), + resource.TestCheckResourceAttr(resourceName, "interval_in_seconds", "60"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_vmComplete(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + autoStart := "false" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_completeVmConfig(ri, location, autoStart), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_start", "false"), + resource.TestCheckResourceAttr(resourceName, "interval_in_seconds", "30"), + resource.TestCheckResourceAttr(resourceName, "source.0.port", "20020"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_vmUpdate(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_basicVmConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + ), + }, + { + Config: testAccAzureRMConnectionMonitor_completeVmConfig(ri, location, "true"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_start", "true"), + resource.TestCheckResourceAttr(resourceName, "interval_in_seconds", "30"), + resource.TestCheckResourceAttr(resourceName, "source.0.port", "20020"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_destinationUpdate(t *testing.T) { + resourceName := "azurerm_connection_monitor.test" + + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_basicAddressConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "destination.0.address"), + ), + }, + { + Config: testAccAzureRMConnectionMonitor_basicVmConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "destination.0.virtual_machine_id"), + ), + }, + { + Config: testAccAzureRMConnectionMonitor_basicAddressConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMConnectionMonitorExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "destination.0.address"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_missingDestination(t *testing.T) { + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_missingDestinationConfig(ri, location), + ExpectError: regexp.MustCompile("Error: either `destination.virtual_machine_id` or `destination.address` must be specified"), + }, + }, + }) +} + +func testAccAzureRMConnectionMonitor_conflictingDestinations(t *testing.T) { + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMConnectionMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMConnectionMonitor_conflictingDestinationsConfig(ri, location), + ExpectError: regexp.MustCompile("conflicts with destination.0.address"), + }, + }, + }) +} + +func testCheckAzureRMConnectionMonitorExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + resourceGroup := rs.Primary.Attributes["resource_group_name"] + watcherName := rs.Primary.Attributes["network_watcher_name"] + connectionMonitorName := rs.Primary.Attributes["name"] + + client := testAccProvider.Meta().(*ArmClient).connectionMonitorsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := client.Get(ctx, resourceGroup, watcherName, connectionMonitorName) + if err != nil { + return fmt.Errorf("Bad: Get on connectionMonitorsClient: %s", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Connection Monitor does not exist: %s", connectionMonitorName) + } + + return nil + } +} + +func testCheckAzureRMConnectionMonitorDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).connectionMonitorsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_connection_monitor" { + continue + } + + resourceGroup := rs.Primary.Attributes["resource_group_name"] + watcherName := rs.Primary.Attributes["network_watcher_name"] + connectionMonitorName := rs.Primary.Attributes["name"] + + resp, err := client.Get(ctx, resourceGroup, watcherName, connectionMonitorName) + + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Connection Monitor still exists:%s", *resp.Name) + } + } + + return nil +} + +func testAccAzureRMConnectionMonitor_baseConfig(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_network_watcher" "test" { + name = "acctnw-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "internal" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_network_interface" "src" { + name = "acctni-src%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_virtual_machine" "src" { + name = "acctvm-src%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.src.id}"] + vm_size = "Standard_D1_v2" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "osdisk-src%d" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "hostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} + +resource "azurerm_virtual_machine_extension" "src" { + name = "network-watcher" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_machine_name = "${azurerm_virtual_machine.src.name}" + publisher = "Microsoft.Azure.NetworkWatcher" + type = "NetworkWatcherAgentLinux" + type_handler_version = "1.4" + auto_upgrade_minor_version = true +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt) +} + +func testAccAzureRMConnectionMonitor_baseWithDestConfig(rInt int, location string) string { + config := testAccAzureRMConnectionMonitor_baseConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_interface" "dest" { + name = "acctni-dest%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_virtual_machine" "dest" { + name = "acctvm-dest%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.dest.id}"] + vm_size = "Standard_D1_v2" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "osdisk-dest%d" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "hostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} +`, config, rInt, rInt, rInt, rInt) +} + +func testAccAzureRMConnectionMonitor_basicAddressConfig(rInt int, location string) string { + config := testAccAzureRMConnectionMonitor_baseConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "test" { + name = "acctestcm-%d" + network_watcher_name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_network_watcher.test.location}" + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + } + + destination { + address = "terraform.io" + port = 80 + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config, rInt) +} + +func testAccAzureRMConnectionMonitor_completeAddressConfig(rInt int, location, autoStart string) string { + config := testAccAzureRMConnectionMonitor_baseConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "test" { + name = "acctestcm-%d" + network_watcher_name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_network_watcher.test.location}" + + auto_start = %s + interval_in_seconds = 30 + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + port = 20020 + } + + destination { + address = "terraform.io" + port = 443 + } + + tags = { + env = "test" + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config, rInt, autoStart) +} + +func testAccAzureRMConnectionMonitor_basicVmConfig(rInt int, location string) string { + config := testAccAzureRMConnectionMonitor_baseWithDestConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "test" { + name = "acctestcm-%d" + network_watcher_name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_network_watcher.test.location}" + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + } + + destination { + virtual_machine_id = "${azurerm_virtual_machine.dest.id}" + port = 80 + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config, rInt) +} + +func testAccAzureRMConnectionMonitor_completeVmConfig(rInt int, location, autoStart string) string { + config := testAccAzureRMConnectionMonitor_baseWithDestConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "test" { + name = "acctestcm-%d" + network_watcher_name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_network_watcher.test.location}" + + auto_start = %s + interval_in_seconds = 30 + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + port = 20020 + } + + destination { + virtual_machine_id = "${azurerm_virtual_machine.dest.id}" + port = 443 + } + + tags = { + env = "test" + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config, rInt, autoStart) +} + +func testAccAzureRMConnectionMonitor_missingDestinationConfig(rInt int, location string) string { + config := testAccAzureRMConnectionMonitor_baseConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "test" { + name = "acctestcm-%d" + network_watcher_name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_network_watcher.test.location}" + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + } + + destination { + port = 80 + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config, rInt) +} + +func testAccAzureRMConnectionMonitor_conflictingDestinationsConfig(rInt int, location string) string { + config := testAccAzureRMConnectionMonitor_baseConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "test" { + name = "acctestcm-%d" + network_watcher_name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_network_watcher.test.location}" + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + } + + destination { + address = "terraform.io" + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + port = 80 + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config, rInt) +} + +func testAccAzureRMConnectionMonitor_requiresImportConfig(rInt int, location string) string { + config := testAccAzureRMConnectionMonitor_basicAddressConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_connection_monitor" "import" { + name = "${azurerm_connection_monitor.test.name}" + network_watcher_name = "${azurerm_connection_monitor.test.network_watcher_name}" + resource_group_name = "${azurerm_connection_monitor.test.resource_group_name}" + location = "${azurerm_connection_monitor.test.location}" + + source { + virtual_machine_id = "${azurerm_virtual_machine.src.id}" + } + + destination { + address = "terraform.io" + port = 80 + } + + depends_on = ["azurerm_virtual_machine_extension.src"] +} +`, config) +} diff --git a/azurerm/resource_arm_container_group_test.go b/azurerm/resource_arm_container_group_test.go index 714ab1c58597..e35c5ccb1349 100644 --- a/azurerm/resource_arm_container_group_test.go +++ b/azurerm/resource_arm_container_group_test.go @@ -344,7 +344,7 @@ resource "azurerm_container_group" "test" { port = 80 } - tags { + tags = { environment = "Testing" } } @@ -371,7 +371,7 @@ resource "azurerm_container_group" "import" { port = "80" } - tags { + tags = { environment = "Testing" } } @@ -420,7 +420,7 @@ resource "azurerm_container_group" "test" { memory = "0.5" } - tags { + tags = { environment = "Testing" } } @@ -464,7 +464,7 @@ resource "azurerm_container_group" "test" { memory = "0.5" } - tags { + tags = { environment = "Testing" } } @@ -506,7 +506,7 @@ resource "azurerm_container_group" "test" { memory = "0.5" } - tags { + tags = { environment = "Testing" } } @@ -542,7 +542,7 @@ resource "azurerm_container_group" "test" { } } - tags { + tags = { environment = "Testing" } } @@ -595,12 +595,12 @@ resource "azurerm_container_group" "test" { protocol = "TCP" } - environment_variables { + environment_variables = { "foo" = "bar" "foo1" = "bar1" } - secure_environment_variables { + secure_environment_variables = { "secureFoo" = "secureBar" "secureFoo1" = "secureBar1" } @@ -617,7 +617,7 @@ resource "azurerm_container_group" "test" { } } - tags { + tags = { environment = "Testing" } } @@ -698,12 +698,12 @@ resource "azurerm_container_group" "test" { storage_account_key = "${azurerm_storage_account.test.primary_access_key}" } - environment_variables { + environment_variables = { "foo" = "bar" "foo1" = "bar1" } - secure_environment_variables { + secure_environment_variables = { "secureFoo" = "secureBar" "secureFoo1" = "secureBar1" } @@ -720,7 +720,7 @@ resource "azurerm_container_group" "test" { } } - tags { + tags = { environment = "Testing" } } diff --git a/azurerm/resource_arm_container_registry_migrate_test.go b/azurerm/resource_arm_container_registry_migrate_test.go index 4d2b32d20d86..65a7ae96f173 100644 --- a/azurerm/resource_arm_container_registry_migrate_test.go +++ b/azurerm/resource_arm_container_registry_migrate_test.go @@ -3,6 +3,7 @@ package azurerm import ( "context" "fmt" + "log" "reflect" "testing" @@ -139,6 +140,10 @@ func createStorageAccount(client *ArmClient, resourceGroupName, storageAccountNa func destroyStorageAccountAndResourceGroup(client *ArmClient, resourceGroupName, storageAccountName string) { ctx := client.StopContext - client.storageServiceClient.Delete(ctx, resourceGroupName, storageAccountName) - client.resourceGroupsClient.Delete(ctx, resourceGroupName) + if _, err := client.storageServiceClient.Delete(ctx, resourceGroupName, storageAccountName); err != nil { + log.Printf("[DEBUG] Error deleting Storage Account %q (Resource Group %q): %v", storageAccountName, resourceGroupName, err) + } + if _, err := client.resourceGroupsClient.Delete(ctx, resourceGroupName); err != nil { + log.Printf("[DEBUG] Error deleting Resource Group %q): %v", resourceGroupName, err) + } } diff --git a/azurerm/resource_arm_container_registry_test.go b/azurerm/resource_arm_container_registry_test.go index efbc0668db28..2960e42847ed 100644 --- a/azurerm/resource_arm_container_registry_test.go +++ b/azurerm/resource_arm_container_registry_test.go @@ -536,7 +536,7 @@ resource "azurerm_container_registry" "test" { sku = "Classic" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { environment = "production" } } @@ -566,7 +566,7 @@ resource "azurerm_container_registry" "test" { sku = "Classic" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { environment = "production" } } diff --git a/azurerm/resource_arm_container_service_test.go b/azurerm/resource_arm_container_service_test.go index b95ee01f85f9..82b5f92c1f77 100644 --- a/azurerm/resource_arm_container_service_test.go +++ b/azurerm/resource_arm_container_service_test.go @@ -349,7 +349,7 @@ resource "azurerm_container_service" "test" { enabled = false } - tags { + tags = { you = "me" } } diff --git a/azurerm/resource_arm_cosmos_db_account_test.go b/azurerm/resource_arm_cosmos_db_account_test.go index a9bea0f45d95..b8851f158398 100644 --- a/azurerm/resource_arm_cosmos_db_account_test.go +++ b/azurerm/resource_arm_cosmos_db_account_test.go @@ -230,7 +230,7 @@ func TestAccAzureRMCosmosDBAccount_mongoDB(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkAccAzureRMCosmosDBAccount_basic(resourceName, testLocation(), string(documentdb.BoundedStaleness), 1), resource.TestCheckResourceAttr(resourceName, "kind", "MongoDB"), - resource.TestCheckResourceAttr(resourceName, "connection_strings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "connection_strings.#", "4"), ), }, { diff --git a/azurerm/resource_arm_data_lake_analytics_account_test.go b/azurerm/resource_arm_data_lake_analytics_account_test.go index 4f60f8cf7208..b9a3bf3ce3da 100644 --- a/azurerm/resource_arm_data_lake_analytics_account_test.go +++ b/azurerm/resource_arm_data_lake_analytics_account_test.go @@ -237,7 +237,7 @@ resource "azurerm_data_lake_analytics_account" "test" { default_store_account_name = "${azurerm_data_lake_store.test.name}" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -257,7 +257,7 @@ resource "azurerm_data_lake_analytics_account" "test" { default_store_account_name = "${azurerm_data_lake_store.test.name}" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_data_lake_store_test.go b/azurerm/resource_arm_data_lake_store_test.go index 0ab29a64685e..b8c1a4da4bec 100644 --- a/azurerm/resource_arm_data_lake_store_test.go +++ b/azurerm/resource_arm_data_lake_store_test.go @@ -341,7 +341,7 @@ resource "azurerm_data_lake_store" "test" { resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -361,7 +361,7 @@ resource "azurerm_data_lake_store" "test" { resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_databricks_workspace_test.go b/azurerm/resource_arm_databricks_workspace_test.go index a64c21028ead..0cb96624f957 100644 --- a/azurerm/resource_arm_databricks_workspace_test.go +++ b/azurerm/resource_arm_databricks_workspace_test.go @@ -258,7 +258,7 @@ resource "azurerm_databricks_workspace" "test" { sku = "standard" managed_resource_group_name = "acctestRG-%d-managed" - tags { + tags = { Environment = "Production" Pricing = "Standard" } @@ -280,7 +280,7 @@ resource "azurerm_databricks_workspace" "test" { sku = "standard" managed_resource_group_name = "acctestRG-%d-managed" - tags { + tags = { Pricing = "Standard" } } diff --git a/azurerm/resource_arm_ddos_protection_plan_test.go b/azurerm/resource_arm_ddos_protection_plan_test.go index 6f525d9eb64c..178215139eed 100644 --- a/azurerm/resource_arm_ddos_protection_plan_test.go +++ b/azurerm/resource_arm_ddos_protection_plan_test.go @@ -38,7 +38,7 @@ func testAccAzureRMDDoSProtectionPlan_basic(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMDDoSProtectionPlanDestroy, @@ -69,7 +69,7 @@ func testAccAzureRMDDoSProtectionPlan_requiresImport(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMDDoSProtectionPlanDestroy, @@ -93,7 +93,7 @@ func testAccAzureRMDDoSProtectionPlan_withTags(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMDDoSProtectionPlanDestroy, @@ -129,7 +129,7 @@ func testAccAzureRMDDoSProtectionPlan_disappears(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMDDoSProtectionPlanDestroy, @@ -271,7 +271,7 @@ resource "azurerm_ddos_protection_plan" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -291,7 +291,7 @@ resource "azurerm_ddos_protection_plan" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Staging" } } diff --git a/azurerm/resource_arm_dev_test_lab_test.go b/azurerm/resource_arm_dev_test_lab_test.go index a6f16cec5ca3..0e2b9d4002ff 100644 --- a/azurerm/resource_arm_dev_test_lab_test.go +++ b/azurerm/resource_arm_dev_test_lab_test.go @@ -192,7 +192,7 @@ resource "azurerm_dev_test_lab" "test" { resource_group_name = "${azurerm_resource_group.test.name}" storage_type = "Standard" - tags { + tags = { "Hello" = "World" } } diff --git a/azurerm/resource_arm_dev_test_linux_virtual_machine_test.go b/azurerm/resource_arm_dev_test_linux_virtual_machine_test.go index 21ba7decc3f0..0059198f8c76 100644 --- a/azurerm/resource_arm_dev_test_linux_virtual_machine_test.go +++ b/azurerm/resource_arm_dev_test_linux_virtual_machine_test.go @@ -346,7 +346,7 @@ resource "azurerm_dev_test_linux_virtual_machine" "test" { backend_port = 3389 } - tags { + tags = { "Acceptance" = "Test" } } diff --git a/azurerm/resource_arm_dev_test_policy_test.go b/azurerm/resource_arm_dev_test_policy_test.go index 054c5a627c7c..8b1c899ecd93 100644 --- a/azurerm/resource_arm_dev_test_policy_test.go +++ b/azurerm/resource_arm_dev_test_policy_test.go @@ -214,7 +214,7 @@ resource "azurerm_dev_test_policy" "test" { evaluator_type = "MaxValuePolicy" description = "Aloha this is the max number of VM's'" - tags { + tags = { "Acceptance" = "Test" } } diff --git a/azurerm/resource_arm_dev_test_windows_virtual_machine_test.go b/azurerm/resource_arm_dev_test_windows_virtual_machine_test.go index 563e2adad29b..b05f5fe1f35f 100644 --- a/azurerm/resource_arm_dev_test_windows_virtual_machine_test.go +++ b/azurerm/resource_arm_dev_test_windows_virtual_machine_test.go @@ -286,7 +286,7 @@ resource "azurerm_dev_test_windows_virtual_machine" "test" { backend_port = 3389 } - tags { + tags = { "Acceptance" = "Test" } } diff --git a/azurerm/resource_arm_dns_a_record.go b/azurerm/resource_arm_dns_a_record.go index 8885ba03f586..9de48978f8d5 100644 --- a/azurerm/resource_arm_dns_a_record.go +++ b/azurerm/resource_arm_dns_a_record.go @@ -75,24 +75,18 @@ func resourceArmDnsARecordCreateUpdate(d *schema.ResourceData, meta interface{}) ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsARecords(d) - if err != nil { - return err - } - parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - ARecords: &records, + ARecords: expandAzureRmDnsARecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.A, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.A, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS A Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -178,7 +172,7 @@ func flattenAzureRmDnsARecords(records *[]dns.ARecord) []string { return results } -func expandAzureRmDnsARecords(d *schema.ResourceData) ([]dns.ARecord, error) { +func expandAzureRmDnsARecords(d *schema.ResourceData) *[]dns.ARecord { recordStrings := d.Get("records").(*schema.Set).List() records := make([]dns.ARecord, len(recordStrings)) @@ -189,5 +183,5 @@ func expandAzureRmDnsARecords(d *schema.ResourceData) ([]dns.ARecord, error) { } } - return records, nil + return &records } diff --git a/azurerm/resource_arm_dns_a_record_test.go b/azurerm/resource_arm_dns_a_record_test.go index a0b54a96ab6a..4db1381db701 100644 --- a/azurerm/resource_arm_dns_a_record_test.go +++ b/azurerm/resource_arm_dns_a_record_test.go @@ -267,7 +267,7 @@ resource "azurerm_dns_a_record" "test" { ttl = 300 records = ["1.2.3.4", "1.2.4.5"] - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -294,7 +294,7 @@ resource "azurerm_dns_a_record" "test" { ttl = 300 records = ["1.2.3.4", "1.2.4.5"] - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_aaaa_record.go b/azurerm/resource_arm_dns_aaaa_record.go index 02bebae7955f..819b8b5b5503 100644 --- a/azurerm/resource_arm_dns_aaaa_record.go +++ b/azurerm/resource_arm_dns_aaaa_record.go @@ -75,24 +75,18 @@ func resourceArmDnsAaaaRecordCreateUpdate(d *schema.ResourceData, meta interface ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsAaaaRecords(d) - if err != nil { - return err - } - parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - AaaaRecords: &records, + AaaaRecords: expandAzureRmDnsAaaaRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.AAAA, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.AAAA, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS AAAA Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -178,7 +172,7 @@ func flattenAzureRmDnsAaaaRecords(records *[]dns.AaaaRecord) []string { return results } -func expandAzureRmDnsAaaaRecords(d *schema.ResourceData) ([]dns.AaaaRecord, error) { +func expandAzureRmDnsAaaaRecords(d *schema.ResourceData) *[]dns.AaaaRecord { recordStrings := d.Get("records").(*schema.Set).List() records := make([]dns.AaaaRecord, len(recordStrings)) @@ -189,5 +183,5 @@ func expandAzureRmDnsAaaaRecords(d *schema.ResourceData) ([]dns.AaaaRecord, erro } } - return records, nil + return &records } diff --git a/azurerm/resource_arm_dns_aaaa_record_test.go b/azurerm/resource_arm_dns_aaaa_record_test.go index 1d2b7e1eea90..a89d18af7497 100644 --- a/azurerm/resource_arm_dns_aaaa_record_test.go +++ b/azurerm/resource_arm_dns_aaaa_record_test.go @@ -267,7 +267,7 @@ resource "azurerm_dns_aaaa_record" "test" { ttl = 300 records = ["2607:f8b0:4009:1803::1005", "2607:f8b0:4009:1803::1006"] - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -294,7 +294,7 @@ resource "azurerm_dns_aaaa_record" "test" { ttl = 300 records = ["2607:f8b0:4009:1803::1005", "2607:f8b0:4009:1803::1006"] - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_caa_record.go b/azurerm/resource_arm_dns_caa_record.go index f98a5fa078e7..9068ac6320cd 100644 --- a/azurerm/resource_arm_dns_caa_record.go +++ b/azurerm/resource_arm_dns_caa_record.go @@ -101,24 +101,18 @@ func resourceArmDnsCaaRecordCreateUpdate(d *schema.ResourceData, meta interface{ ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsCaaRecords(d) - if err != nil { - return err - } - parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - CaaRecords: &records, + CaaRecords: expandAzureRmDnsCaaRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.CAA, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.CAA, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS CAA Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -208,7 +202,7 @@ func flattenAzureRmDnsCaaRecords(records *[]dns.CaaRecord) []map[string]interfac return results } -func expandAzureRmDnsCaaRecords(d *schema.ResourceData) ([]dns.CaaRecord, error) { +func expandAzureRmDnsCaaRecords(d *schema.ResourceData) *[]dns.CaaRecord { recordStrings := d.Get("record").(*schema.Set).List() records := make([]dns.CaaRecord, len(recordStrings)) @@ -227,7 +221,7 @@ func expandAzureRmDnsCaaRecords(d *schema.ResourceData) ([]dns.CaaRecord, error) records[i] = caaRecord } - return records, nil + return &records } func resourceArmDnsCaaRecordHash(v interface{}) int { diff --git a/azurerm/resource_arm_dns_caa_record_test.go b/azurerm/resource_arm_dns_caa_record_test.go index 32d09ee5d31c..d4aece83a91c 100644 --- a/azurerm/resource_arm_dns_caa_record_test.go +++ b/azurerm/resource_arm_dns_caa_record_test.go @@ -353,7 +353,7 @@ resource "azurerm_dns_caa_record" "test" { value = ";" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -391,7 +391,7 @@ resource "azurerm_dns_caa_record" "test" { value = ";" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_cname_record.go b/azurerm/resource_arm_dns_cname_record.go index e8ec99f7a1bc..e80e3832de18 100644 --- a/azurerm/resource_arm_dns_cname_record.go +++ b/azurerm/resource_arm_dns_cname_record.go @@ -95,8 +95,7 @@ func resourceArmDnsCNameRecordCreateUpdate(d *schema.ResourceData, meta interfac eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.CNAME, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.CNAME, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS CNAME Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } diff --git a/azurerm/resource_arm_dns_cname_record_test.go b/azurerm/resource_arm_dns_cname_record_test.go index a23a77b0f223..a681c8617cd7 100644 --- a/azurerm/resource_arm_dns_cname_record_test.go +++ b/azurerm/resource_arm_dns_cname_record_test.go @@ -313,7 +313,7 @@ resource "azurerm_dns_cname_record" "test" { ttl = 300 record = "contoso.com" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -340,7 +340,7 @@ resource "azurerm_dns_cname_record" "test" { ttl = 300 record = "contoso.com" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_mx_record.go b/azurerm/resource_arm_dns_mx_record.go index f182e3b93b67..549c045b90e9 100644 --- a/azurerm/resource_arm_dns_mx_record.go +++ b/azurerm/resource_arm_dns_mx_record.go @@ -90,24 +90,19 @@ func resourceArmDnsMxRecordCreateUpdate(d *schema.ResourceData, meta interface{} ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsMxRecords(d) - if err != nil { - return err - } parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - MxRecords: &records, + MxRecords: expandAzureRmDnsMxRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.MX, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.MX, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS MX Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -204,7 +199,7 @@ func flattenAzureRmDnsMxRecords(records *[]dns.MxRecord) []map[string]interface{ // expand creates an array of dns.MxRecord, that is, the array needed // by azure-sdk-for-go to manipulate azure resources, hence Preference // is an int32 -func expandAzureRmDnsMxRecords(d *schema.ResourceData) ([]dns.MxRecord, error) { +func expandAzureRmDnsMxRecords(d *schema.ResourceData) *[]dns.MxRecord { recordStrings := d.Get("record").(*schema.Set).List() records := make([]dns.MxRecord, len(recordStrings)) @@ -221,7 +216,7 @@ func expandAzureRmDnsMxRecords(d *schema.ResourceData) ([]dns.MxRecord, error) { } } - return records, nil + return &records } func resourceArmDnsMxRecordHash(v interface{}) int { diff --git a/azurerm/resource_arm_dns_mx_record_test.go b/azurerm/resource_arm_dns_mx_record_test.go index 9dc53efd28d1..2ae058a1215e 100644 --- a/azurerm/resource_arm_dns_mx_record_test.go +++ b/azurerm/resource_arm_dns_mx_record_test.go @@ -308,7 +308,7 @@ resource "azurerm_dns_mx_record" "test" { exchange = "mail2.contoso.com" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -344,7 +344,7 @@ resource "azurerm_dns_mx_record" "test" { exchange = "mail2.contoso.com" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_ns_record.go b/azurerm/resource_arm_dns_ns_record.go index aaaeca4ed1e9..c401407966b2 100644 --- a/azurerm/resource_arm_dns_ns_record.go +++ b/azurerm/resource_arm_dns_ns_record.go @@ -92,24 +92,19 @@ func resourceArmDnsNsRecordCreateUpdate(d *schema.ResourceData, meta interface{} ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsNsRecords(d) - if err != nil { - return err - } parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - NsRecords: &records, + NsRecords: expandAzureRmDnsNsRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.NS, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.NS, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS NS Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -216,7 +211,7 @@ func flattenAzureRmDnsNsRecords(records *[]dns.NsRecord) []string { return results } -func expandAzureRmDnsNsRecords(d *schema.ResourceData) ([]dns.NsRecord, error) { +func expandAzureRmDnsNsRecords(d *schema.ResourceData) *[]dns.NsRecord { var records []dns.NsRecord //TODO: remove this once we remove the `record` attribute @@ -247,5 +242,5 @@ func expandAzureRmDnsNsRecords(d *schema.ResourceData) ([]dns.NsRecord, error) { } } } - return records, nil + return &records } diff --git a/azurerm/resource_arm_dns_ns_record_test.go b/azurerm/resource_arm_dns_ns_record_test.go index 7688eb668c2d..8ef81256abbc 100644 --- a/azurerm/resource_arm_dns_ns_record_test.go +++ b/azurerm/resource_arm_dns_ns_record_test.go @@ -472,7 +472,7 @@ resource "azurerm_dns_ns_record" "test" { records = ["ns1.contoso.com", "ns2.contoso.com"] - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -507,7 +507,7 @@ resource "azurerm_dns_ns_record" "test" { nsdname = "ns2.contoso.com" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -535,7 +535,7 @@ resource "azurerm_dns_ns_record" "test" { records = ["ns1.contoso.com", "ns2.contoso.com"] - tags { + tags = { environment = "staging" } } @@ -569,7 +569,7 @@ resource "azurerm_dns_ns_record" "test" { nsdname = "ns2.contoso.com" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_ptr_record.go b/azurerm/resource_arm_dns_ptr_record.go index 261497b38d5c..d3caa2abfbab 100644 --- a/azurerm/resource_arm_dns_ptr_record.go +++ b/azurerm/resource_arm_dns_ptr_record.go @@ -75,23 +75,17 @@ func resourceArmDnsPtrRecordCreateUpdate(d *schema.ResourceData, meta interface{ ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsPtrRecords(d) - if err != nil { - return err - } - parameters := dns.RecordSet{ RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - PtrRecords: &records, + PtrRecords: expandAzureRmDnsPtrRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.PTR, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.PTR, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS PTR Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -184,7 +178,7 @@ func flattenAzureRmDnsPtrRecords(records *[]dns.PtrRecord) []string { return results } -func expandAzureRmDnsPtrRecords(d *schema.ResourceData) ([]dns.PtrRecord, error) { +func expandAzureRmDnsPtrRecords(d *schema.ResourceData) *[]dns.PtrRecord { recordStrings := d.Get("records").(*schema.Set).List() records := make([]dns.PtrRecord, len(recordStrings)) @@ -195,5 +189,5 @@ func expandAzureRmDnsPtrRecords(d *schema.ResourceData) ([]dns.PtrRecord, error) } } - return records, nil + return &records } diff --git a/azurerm/resource_arm_dns_ptr_record_test.go b/azurerm/resource_arm_dns_ptr_record_test.go index 10a69b77eb9c..919bf0f15e16 100644 --- a/azurerm/resource_arm_dns_ptr_record_test.go +++ b/azurerm/resource_arm_dns_ptr_record_test.go @@ -268,7 +268,7 @@ resource "azurerm_dns_ptr_record" "test" { ttl = 300 records = ["hashicorp.com", "microsoft.com"] - tags { + tags = { environment = "Dev" cost_center = "Ops" } @@ -295,7 +295,7 @@ resource "azurerm_dns_ptr_record" "test" { ttl = 300 records = ["hashicorp.com", "microsoft.com"] - tags { + tags = { environment = "Stage" } } diff --git a/azurerm/resource_arm_dns_srv_record.go b/azurerm/resource_arm_dns_srv_record.go index bc6dfe566a40..c652c100b591 100644 --- a/azurerm/resource_arm_dns_srv_record.go +++ b/azurerm/resource_arm_dns_srv_record.go @@ -99,24 +99,18 @@ func resourceArmDnsSrvRecordCreateUpdate(d *schema.ResourceData, meta interface{ ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsSrvRecords(d) - if err != nil { - return err - } - parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - SrvRecords: &records, + SrvRecords: expandAzureRmDnsSrvRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.SRV, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.SRV, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS SRV Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -207,7 +201,7 @@ func flattenAzureRmDnsSrvRecords(records *[]dns.SrvRecord) []map[string]interfac return results } -func expandAzureRmDnsSrvRecords(d *schema.ResourceData) ([]dns.SrvRecord, error) { +func expandAzureRmDnsSrvRecords(d *schema.ResourceData) *[]dns.SrvRecord { recordStrings := d.Get("record").(*schema.Set).List() records := make([]dns.SrvRecord, len(recordStrings)) @@ -228,7 +222,7 @@ func expandAzureRmDnsSrvRecords(d *schema.ResourceData) ([]dns.SrvRecord, error) records[i] = srvRecord } - return records, nil + return &records } func resourceArmDnsSrvRecordHash(v interface{}) int { diff --git a/azurerm/resource_arm_dns_srv_record_test.go b/azurerm/resource_arm_dns_srv_record_test.go index 8c3a0fadd5e8..9b7665585b00 100644 --- a/azurerm/resource_arm_dns_srv_record_test.go +++ b/azurerm/resource_arm_dns_srv_record_test.go @@ -59,7 +59,7 @@ func TestAccAzureRMDnsSrvRecord_requiresImport(t *testing.T) { }, { Config: testAccAzureRMDnsSrvRecord_requiresImport(ri, location), - ExpectError: testRequiresImportError("v"), + ExpectError: testRequiresImportError("azurerm_dns_srv_record"), }, }, }) @@ -326,7 +326,7 @@ resource "azurerm_dns_srv_record" "test" { target = "target2.contoso.com" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -366,7 +366,7 @@ resource "azurerm_dns_srv_record" "test" { target = "target2.contoso.com" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_txt_record.go b/azurerm/resource_arm_dns_txt_record.go index 118086fcb4f2..753b780d2ecf 100644 --- a/azurerm/resource_arm_dns_txt_record.go +++ b/azurerm/resource_arm_dns_txt_record.go @@ -81,24 +81,18 @@ func resourceArmDnsTxtRecordCreateUpdate(d *schema.ResourceData, meta interface{ ttl := int64(d.Get("ttl").(int)) tags := d.Get("tags").(map[string]interface{}) - records, err := expandAzureRmDnsTxtRecords(d) - if err != nil { - return err - } - parameters := dns.RecordSet{ Name: &name, RecordSetProperties: &dns.RecordSetProperties{ Metadata: expandTags(tags), TTL: &ttl, - TxtRecords: &records, + TxtRecords: expandAzureRmDnsTxtRecords(d), }, } eTag := "" ifNoneMatch := "" // set to empty to allow updates to records after creation - _, err = client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.TXT, parameters, eTag, ifNoneMatch) - if err != nil { + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, name, dns.TXT, parameters, eTag, ifNoneMatch); err != nil { return fmt.Errorf("Error creating/updating DNS TXT Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) } @@ -191,7 +185,7 @@ func flattenAzureRmDnsTxtRecords(records *[]dns.TxtRecord) []map[string]interfac return results } -func expandAzureRmDnsTxtRecords(d *schema.ResourceData) ([]dns.TxtRecord, error) { +func expandAzureRmDnsTxtRecords(d *schema.ResourceData) *[]dns.TxtRecord { recordStrings := d.Get("record").(*schema.Set).List() records := make([]dns.TxtRecord, len(recordStrings)) @@ -206,5 +200,5 @@ func expandAzureRmDnsTxtRecords(d *schema.ResourceData) ([]dns.TxtRecord, error) records[i] = txtRecord } - return records, nil + return &records } diff --git a/azurerm/resource_arm_dns_txt_record_test.go b/azurerm/resource_arm_dns_txt_record_test.go index 26c6bb398df3..366f2d5a4373 100644 --- a/azurerm/resource_arm_dns_txt_record_test.go +++ b/azurerm/resource_arm_dns_txt_record_test.go @@ -297,7 +297,7 @@ resource "azurerm_dns_txt_record" "test" { value = "Another test txt string" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -331,7 +331,7 @@ resource "azurerm_dns_txt_record" "test" { value = "Another test txt string" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_dns_zone_test.go b/azurerm/resource_arm_dns_zone_test.go index e707c6d1c904..a1e46541c44e 100644 --- a/azurerm/resource_arm_dns_zone_test.go +++ b/azurerm/resource_arm_dns_zone_test.go @@ -241,7 +241,7 @@ resource "azurerm_dns_zone" "test" { name = "acctestzone%d.com" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -260,7 +260,7 @@ resource "azurerm_dns_zone" "test" { name = "acctestzone%d.com" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_eventgrid_domain.go b/azurerm/resource_arm_eventgrid_domain.go new file mode 100644 index 000000000000..cd76037a8d82 --- /dev/null +++ b/azurerm/resource_arm_eventgrid_domain.go @@ -0,0 +1,396 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmEventGridDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceArmEventGridDomainCreateUpdate, + Read: resourceArmEventGridDomainRead, + Update: resourceArmEventGridDomainCreateUpdate, + Delete: resourceArmEventGridDomainDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "location": locationSchema(), + + "resource_group_name": resourceGroupNameSchema(), + + "tags": tagsSchema(), + + "input_schema": { + Type: schema.TypeString, + Optional: true, + Default: string(eventgrid.InputSchemaEventGridSchema), + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(eventgrid.InputSchemaCloudEventV01Schema), + string(eventgrid.InputSchemaCustomEventSchema), + string(eventgrid.InputSchemaEventGridSchema), + }, false), + }, + + "input_mapping_fields": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "topic": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "event_time": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "event_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "subject": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "data_version": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, + + "input_mapping_default_values": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "subject": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "data_version": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceArmEventGridDomainCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).eventGridDomainsClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_eventgrid_domain", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) + tags := d.Get("tags").(map[string]interface{}) + + domainProperties := &eventgrid.DomainProperties{ + InputSchemaMapping: expandAzureRmEventgridDomainInputMapping(d), + InputSchema: eventgrid.InputSchema(d.Get("input_schema").(string)), + } + + domain := eventgrid.Domain{ + Location: &location, + DomainProperties: domainProperties, + Tags: expandTags(tags), + } + + log.Printf("[INFO] preparing arguments for AzureRM EventGrid Domain creation with Properties: %+v", domain) + + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, domain) + if err != nil { + return fmt.Errorf("Error creating/updating EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for EventGrid Domain %q (Resource Group %q) to become available: %s", name, resourceGroup, err) + } + + read, err := client.Get(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error retrieving EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + if read.ID == nil { + return fmt.Errorf("Cannot read EventGrid Domain %q (resource group %s) ID", name, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmEventGridDomainRead(d, meta) +} + +func resourceArmEventGridDomainRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).eventGridDomainsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + name := id.Path["domains"] + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[WARN] EventGrid Domain %q was not found (Resource Group %q)", name, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on EventGrid Domain %q: %+v", name, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + + if props := resp.DomainProperties; props != nil { + d.Set("endpoint", props.Endpoint) + + d.Set("input_schema", string(props.InputSchema)) + + inputMappingFields, err := flattenAzureRmEventgridDomainInputMapping(props.InputSchemaMapping) + if err != nil { + return fmt.Errorf("Unable to flatten `input_schema_mapping_fields` for EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + if err := d.Set("input_mapping_fields", inputMappingFields); err != nil { + return fmt.Errorf("Error setting `input_schema_mapping_fields` for EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + + inputMappingDefaultValues, err := flattenAzureRmEventgridDomainInputMappingDefaultValues(props.InputSchemaMapping) + if err != nil { + return fmt.Errorf("Unable to flatten `input_schema_mapping_default_values` for EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + if err := d.Set("input_mapping_default_values", inputMappingDefaultValues); err != nil { + return fmt.Errorf("Error setting `input_schema_mapping_fields` for EventGrid Domain %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + flattenAndSetTags(d, resp.Tags) + + return nil +} + +func resourceArmEventGridDomainDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).eventGridDomainsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["domains"] + + future, err := client.Delete(ctx, resGroup, name) + if err != nil { + if response.WasNotFound(future.Response()) { + return nil + } + return fmt.Errorf("Error deleting Event Grid Domain %q: %+v", name, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if response.WasNotFound(future.Response()) { + return nil + } + return fmt.Errorf("Error deleting Event Grid Domain %q: %+v", name, err) + } + + return nil +} + +func expandAzureRmEventgridDomainInputMapping(d *schema.ResourceData) *eventgrid.JSONInputSchemaMapping { + imf, imfok := d.GetOk("input_mapping_fields") + + imdv, imdvok := d.GetOk("input_mapping_default_values") + + if !imfok && !imdvok { + return nil + } + + jismp := eventgrid.JSONInputSchemaMappingProperties{} + + if imfok { + mappings := imf.([]interface{}) + mapping := mappings[0].(map[string]interface{}) + + if id := mapping["id"].(string); id != "" { + jismp.ID = &eventgrid.JSONField{SourceField: &id} + } + + if eventTime := mapping["event_time"].(string); eventTime != "" { + jismp.EventTime = &eventgrid.JSONField{SourceField: &eventTime} + } + + if topic := mapping["topic"].(string); topic != "" { + jismp.Topic = &eventgrid.JSONField{SourceField: &topic} + } + + if dataVersion := mapping["data_version"].(string); dataVersion != "" { + jismp.DataVersion = &eventgrid.JSONFieldWithDefault{SourceField: &dataVersion} + } + + if subject := mapping["subject"].(string); subject != "" { + jismp.Subject = &eventgrid.JSONFieldWithDefault{SourceField: &subject} + } + + if eventType := mapping["event_type"].(string); eventType != "" { + jismp.EventType = &eventgrid.JSONFieldWithDefault{SourceField: &eventType} + } + } + + if imdvok { + mappings := imdv.([]interface{}) + mapping := mappings[0].(map[string]interface{}) + + if dataVersion := mapping["data_version"].(string); dataVersion != "" { + jismp.DataVersion = &eventgrid.JSONFieldWithDefault{DefaultValue: &dataVersion} + } + + if subject := mapping["subject"].(string); subject != "" { + jismp.Subject = &eventgrid.JSONFieldWithDefault{DefaultValue: &subject} + } + + if eventType := mapping["event_type"].(string); eventType != "" { + jismp.EventType = &eventgrid.JSONFieldWithDefault{DefaultValue: &eventType} + } + } + + jsonMapping := eventgrid.JSONInputSchemaMapping{ + JSONInputSchemaMappingProperties: &jismp, + InputSchemaMappingType: eventgrid.InputSchemaMappingTypeJSON, + } + + return &jsonMapping +} + +func flattenAzureRmEventgridDomainInputMapping(input eventgrid.BasicInputSchemaMapping) ([]interface{}, error) { + if input == nil { + return nil, nil + } + result := make(map[string]interface{}) + + jsonValues, ok := input.(eventgrid.JSONInputSchemaMapping) + if !ok { + return nil, fmt.Errorf("Unable to read JSONInputSchemaMapping") + } + props := jsonValues.JSONInputSchemaMappingProperties + + if props.EventTime != nil && props.EventTime.SourceField != nil { + result["event_time"] = *props.EventTime.SourceField + } + + if props.ID != nil && props.ID.SourceField != nil { + result["id"] = *props.ID.SourceField + } + + if props.Topic != nil && props.Topic.SourceField != nil { + result["topic"] = *props.Topic.SourceField + } + + if props.DataVersion != nil && props.DataVersion.SourceField != nil { + result["data_version"] = *props.DataVersion.SourceField + } + + if props.EventType != nil && props.EventType.SourceField != nil { + result["event_type"] = *props.EventType.SourceField + } + + if props.Subject != nil && props.Subject.SourceField != nil { + result["subject"] = *props.Subject.SourceField + } + + return []interface{}{result}, nil +} + +func flattenAzureRmEventgridDomainInputMappingDefaultValues(input eventgrid.BasicInputSchemaMapping) ([]interface{}, error) { + if input == nil { + return nil, nil + } + result := make(map[string]interface{}) + + jsonValues, ok := input.(eventgrid.JSONInputSchemaMapping) + if !ok { + return nil, fmt.Errorf("Unable to read JSONInputSchemaMapping") + } + props := jsonValues.JSONInputSchemaMappingProperties + + if props.DataVersion != nil && props.DataVersion.DefaultValue != nil { + result["data_version"] = *props.DataVersion.DefaultValue + } + + if props.EventType != nil && props.EventType.DefaultValue != nil { + result["event_type"] = *props.EventType.DefaultValue + } + + if props.Subject != nil && props.Subject.DefaultValue != nil { + result["subject"] = *props.Subject.DefaultValue + } + + return []interface{}{result}, nil +} diff --git a/azurerm/resource_arm_eventgrid_domain_test.go b/azurerm/resource_arm_eventgrid_domain_test.go new file mode 100644 index 000000000000..d0de859bb68e --- /dev/null +++ b/azurerm/resource_arm_eventgrid_domain_test.go @@ -0,0 +1,212 @@ +package azurerm + +import ( + "fmt" + "net/http" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMEventGridDomain_basic(t *testing.T) { + resourceName := "azurerm_eventgrid_domain.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMEventGridDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMEventGridDomain_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMEventGridDomainExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMEventGridDomain_mapping(t *testing.T) { + resourceName := "azurerm_eventgrid_domain.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMEventGridDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMEventGridDomain_mapping(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMEventGridDomainExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "input_mapping_fields.0.topic", "test"), + resource.TestCheckResourceAttr(resourceName, "input_mapping_fields.0.topic", "test"), + resource.TestCheckResourceAttr(resourceName, "input_mapping_default_values.0.data_version", "1.0"), + resource.TestCheckResourceAttr(resourceName, "input_mapping_default_values.0.subject", "DefaultSubject"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMEventGridDomain_basicWithTags(t *testing.T) { + resourceName := "azurerm_eventgrid_domain.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMEventGridDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMEventGridDomain_basicWithTags(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMEventGridDomainExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.foo", "bar"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testCheckAzureRMEventGridDomainDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).eventGridDomainsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_eventgrid_domain" { + continue + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return err + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("EventGrid Domain still exists:\n%#v", resp) + } + } + + return nil +} + +func testCheckAzureRMEventGridDomainExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + name := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for EventGrid Domain: %s", name) + } + + client := testAccProvider.Meta().(*ArmClient).eventGridDomainsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Bad: EventGrid Domain %q (resource group: %s) does not exist", name, resourceGroup) + } + + return fmt.Errorf("Bad: Get on eventGridDomainsClient: %s", err) + } + + return nil + } +} + +func testAccAzureRMEventGridDomain_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_eventgrid_domain" "test" { + name = "acctesteg-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +`, rInt, location, rInt) +} + +func testAccAzureRMEventGridDomain_mapping(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_eventgrid_domain" "test" { + name = "acctesteg-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + input_schema = "CustomEventSchema" + input_mapping_fields { + topic = "test" + event_type = "test" + } + + input_mapping_default_values { + data_version = "1.0" + subject = "DefaultSubject" + } +} +`, rInt, location, rInt) +} + +func testAccAzureRMEventGridDomain_basicWithTags(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_eventgrid_domain" "test" { + name = "acctesteg-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + "foo" = "bar" + } +} +`, rInt, location, rInt) +} diff --git a/azurerm/resource_arm_eventgrid_topic.go b/azurerm/resource_arm_eventgrid_topic.go index e57e1c6821a6..dbbb69483a3e 100644 --- a/azurerm/resource_arm_eventgrid_topic.go +++ b/azurerm/resource_arm_eventgrid_topic.go @@ -4,7 +4,7 @@ import ( "fmt" "log" - "github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid" + "github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/resource_arm_eventgrid_topic_test.go b/azurerm/resource_arm_eventgrid_topic_test.go index e19d958cfaf2..c8e5b639bfdf 100644 --- a/azurerm/resource_arm_eventgrid_topic_test.go +++ b/azurerm/resource_arm_eventgrid_topic_test.go @@ -198,7 +198,7 @@ resource "azurerm_eventgrid_topic" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "foo" = "bar" } } diff --git a/azurerm/resource_arm_eventhub_namespace_test.go b/azurerm/resource_arm_eventhub_namespace_test.go index 0068e64d03ac..b2db59180575 100644 --- a/azurerm/resource_arm_eventhub_namespace_test.go +++ b/azurerm/resource_arm_eventhub_namespace_test.go @@ -513,7 +513,7 @@ resource "azurerm_eventhub_namespace" "test" { resource_group_name = "${azurerm_resource_group.test.name}" sku = "Basic" - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_express_route_circuit_authorization_test.go b/azurerm/resource_arm_express_route_circuit_authorization_test.go index 83c0e4a3a770..2c5f24dac49f 100644 --- a/azurerm/resource_arm_express_route_circuit_authorization_test.go +++ b/azurerm/resource_arm_express_route_circuit_authorization_test.go @@ -169,7 +169,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -218,7 +218,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } diff --git a/azurerm/resource_arm_express_route_circuit_peering_test.go b/azurerm/resource_arm_express_route_circuit_peering_test.go index 84843febf6b5..e7b89e911c71 100644 --- a/azurerm/resource_arm_express_route_circuit_peering_test.go +++ b/azurerm/resource_arm_express_route_circuit_peering_test.go @@ -16,7 +16,7 @@ func testAccAzureRMExpressRouteCircuitPeering_azurePrivatePeering(t *testing.T) ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitPeeringDestroy, @@ -50,7 +50,7 @@ func testAccAzureRMExpressRouteCircuitPeering_requiresImport(t *testing.T) { location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitPeeringDestroy, @@ -74,7 +74,7 @@ func testAccAzureRMExpressRouteCircuitPeering_microsoftPeering(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitPeeringDestroy, @@ -173,7 +173,7 @@ resource "azurerm_express_route_circuit" "test" { family = "MeteredData" } - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -230,7 +230,7 @@ resource "azurerm_express_route_circuit" "test" { family = "MeteredData" } - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } diff --git a/azurerm/resource_arm_express_route_circuit_test.go b/azurerm/resource_arm_express_route_circuit_test.go index 3b2daf94ec48..e6a43e03653c 100644 --- a/azurerm/resource_arm_express_route_circuit_test.go +++ b/azurerm/resource_arm_express_route_circuit_test.go @@ -58,7 +58,7 @@ func testAccAzureRMExpressRouteCircuit_basicMetered(t *testing.T) { var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -90,7 +90,7 @@ func testAccAzureRMExpressRouteCircuit_requiresImport(t *testing.T) { location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -114,7 +114,7 @@ func testAccAzureRMExpressRouteCircuit_basicUnlimited(t *testing.T) { var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -139,7 +139,7 @@ func testAccAzureRMExpressRouteCircuit_update(t *testing.T) { var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -167,7 +167,7 @@ func testAccAzureRMExpressRouteCircuit_tierUpdate(t *testing.T) { var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -195,7 +195,7 @@ func testAccAzureRMExpressRouteCircuit_premiumMetered(t *testing.T) { var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -222,7 +222,7 @@ func testAccAzureRMExpressRouteCircuit_premiumUnlimited(t *testing.T) { var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -249,7 +249,7 @@ func testAccAzureRMExpressRouteCircuit_allowClassicOperationsUpdate(t *testing.T var erc network.ExpressRouteCircuit ri := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, @@ -351,7 +351,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -379,7 +379,7 @@ resource "azurerm_express_route_circuit" "import" { allow_classic_operations = false - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -409,7 +409,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -439,7 +439,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -469,7 +469,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = %s - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } diff --git a/azurerm/resource_arm_firewall_test.go b/azurerm/resource_arm_firewall_test.go index 474e490f46d9..134cb607a1ec 100644 --- a/azurerm/resource_arm_firewall_test.go +++ b/azurerm/resource_arm_firewall_test.go @@ -426,7 +426,7 @@ resource "azurerm_firewall" "test" { public_ip_address_id = "${azurerm_public_ip.test.id}" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -474,7 +474,7 @@ resource "azurerm_firewall" "test" { public_ip_address_id = "${azurerm_public_ip.test.id}" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_function_app.go b/azurerm/resource_arm_function_app.go index 422954fa07ee..ed2a182cb234 100644 --- a/azurerm/resource_arm_function_app.go +++ b/azurerm/resource_arm_function_app.go @@ -37,6 +37,11 @@ func resourceArmFunctionApp() *schema.Resource { "location": locationSchema(), + "kind": { + Type: schema.TypeString, + Computed: true, + }, + "app_service_plan_id": { Type: schema.TypeString, Required: true, @@ -183,6 +188,11 @@ func resourceArmFunctionApp() *schema.Resource { Optional: true, Default: false, }, + "linux_fx_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, }, }, }, @@ -447,6 +457,8 @@ func resourceArmFunctionAppRead(d *schema.ResourceData, meta interface{}) error d.Set("name", name) d.Set("resource_group_name", resGroup) + d.Set("kind", resp.Kind) + if location := resp.Location; location != nil { d.Set("location", azureRMNormalizeLocation(*location)) } @@ -622,6 +634,10 @@ func expandFunctionAppSiteConfig(d *schema.ResourceData) web.SiteConfig { siteConfig.WebSocketsEnabled = utils.Bool(v.(bool)) } + if v, ok := config["linux_fx_version"]; ok { + siteConfig.LinuxFxVersion = utils.String(v.(string)) + } + return siteConfig } @@ -646,6 +662,10 @@ func flattenFunctionAppSiteConfig(input *web.SiteConfig) []interface{} { result["websockets_enabled"] = *input.WebSocketsEnabled } + if input.LinuxFxVersion != nil { + result["linux_fx_version"] = *input.LinuxFxVersion + } + results = append(results, result) return results } diff --git a/azurerm/resource_arm_function_app_test.go b/azurerm/resource_arm_function_app_test.go index 5b36a62d2b5a..2500bac3c8df 100644 --- a/azurerm/resource_arm_function_app_test.go +++ b/azurerm/resource_arm_function_app_test.go @@ -206,6 +206,34 @@ func TestAccAzureRMFunctionApp_siteConfig(t *testing.T) { }) } +func TestAccAzureRMFunctionApp_linuxFxVersion(t *testing.T) { + resourceName := "azurerm_function_app.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + config := testAccAzureRMFunctionApp_linuxFxVersion(ri, rs, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMFunctionAppDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMFunctionAppExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "kind", "functionapp,linux,container"), + resource.TestCheckResourceAttr(resourceName, "site_config.0.linux_fx_version", "DOCKER|(golang:latest)"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMFunctionApp_connectionStrings(t *testing.T) { resourceName := "azurerm_function_app.test" ri := tf.AccRandTimeInt() @@ -242,7 +270,8 @@ func TestAccAzureRMFunctionApp_siteConfigMulti(t *testing.T) { configBase := testAccAzureRMFunctionApp_basic(ri, rs, testLocation()) configUpdate1 := testAccAzureRMFunctionApp_appSettings(ri, rs, testLocation()) configUpdate2 := testAccAzureRMFunctionApp_appSettingsAlwaysOn(ri, rs, testLocation()) - configUpdate3 := testAccAzureRMFunctionApp_appSettingsAlwaysOnConnectionStrings(ri, rs, testLocation()) + configUpdate3 := testAccAzureRMFunctionApp_appSettingsAlwaysOnLinuxFxVersion(ri, rs, testLocation()) + configUpdate4 := testAccAzureRMFunctionApp_appSettingsAlwaysOnLinuxFxVersionConnectionStrings(ri, rs, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -277,9 +306,22 @@ func TestAccAzureRMFunctionApp_siteConfigMulti(t *testing.T) { Config: configUpdate3, Check: resource.ComposeTestCheckFunc( testCheckAzureRMFunctionAppExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "kind", "functionapp,linux,container"), resource.TestCheckResourceAttr(resourceName, "app_settings.%", "1"), resource.TestCheckResourceAttr(resourceName, "app_settings.hello", "world"), resource.TestCheckResourceAttr(resourceName, "site_config.0.always_on", "true"), + resource.TestCheckResourceAttr(resourceName, "site_config.0.linux_fx_version", "DOCKER|(golang:latest)"), + ), + }, + { + Config: configUpdate4, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMFunctionAppExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "kind", "functionapp,linux,container"), + resource.TestCheckResourceAttr(resourceName, "app_settings.%", "1"), + resource.TestCheckResourceAttr(resourceName, "app_settings.hello", "world"), + resource.TestCheckResourceAttr(resourceName, "site_config.0.always_on", "true"), + resource.TestCheckResourceAttr(resourceName, "site_config.0.linux_fx_version", "DOCKER|(golang:latest)"), resource.TestCheckResourceAttr(resourceName, "connection_string.0.name", "Example"), resource.TestCheckResourceAttr(resourceName, "connection_string.0.value", "some-postgresql-connection-string"), resource.TestCheckResourceAttr(resourceName, "connection_string.0.type", "PostgreSQL"), @@ -755,7 +797,7 @@ resource "azurerm_function_app" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" - tags { + tags = { environment = "production" } } @@ -795,7 +837,7 @@ resource "azurerm_function_app" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" - tags { + tags = { environment = "production" hello = "Berlin" } @@ -873,7 +915,7 @@ resource "azurerm_function_app" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" - app_settings { + app_settings = { "hello" = "world" } } @@ -920,6 +962,52 @@ resource "azurerm_function_app" "test" { `, rInt, location, rString) } +func testAccAzureRMFunctionApp_linuxFxVersion(rInt int, rString, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%[3]s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_app_service_plan" "test" { + name = "acctestASP-%[1]d" + location = "${azurerm_resource_group.test.location}" + kind = "Linux" + resource_group_name = "${azurerm_resource_group.test.name}" + + sku { + tier = "Standard" + size = "S1" + } + + properties { + reserved = true + } +} + +resource "azurerm_function_app" "test" { + name = "acctest-%[1]d-func" + location = "${azurerm_resource_group.test.location}" + version = "~2" + resource_group_name = "${azurerm_resource_group.test.name}" + app_service_plan_id = "${azurerm_app_service_plan.test.id}" + storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" + + site_config { + linux_fx_version = "DOCKER|(golang:latest)" + } +} +`, rInt, location, rString) +} + func testAccAzureRMFunctionApp_connectionStrings(rInt int, rString, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -995,7 +1083,7 @@ resource "azurerm_function_app" "test" { app_service_plan_id = "${azurerm_app_service_plan.test.id}" storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" - app_settings { + app_settings = { "hello" = "world" } @@ -1006,7 +1094,7 @@ resource "azurerm_function_app" "test" { `, rInt, location, rString) } -func testAccAzureRMFunctionApp_appSettingsAlwaysOnConnectionStrings(rInt int, rString, location string) string { +func testAccAzureRMFunctionApp_appSettingsAlwaysOnLinuxFxVersion(rInt int, rString, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%[1]d" @@ -1024,27 +1112,85 @@ resource "azurerm_storage_account" "test" { resource "azurerm_app_service_plan" "test" { name = "acctestASP-%[1]d" location = "${azurerm_resource_group.test.location}" + kind = "Linux" resource_group_name = "${azurerm_resource_group.test.name}" sku { tier = "Standard" size = "S1" } + + properties { + reserved = true + } } resource "azurerm_function_app" "test" { name = "acctest-%[1]d-func" location = "${azurerm_resource_group.test.location}" + version = "~2" resource_group_name = "${azurerm_resource_group.test.name}" app_service_plan_id = "${azurerm_app_service_plan.test.id}" storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" + + app_settings = { + "hello" = "world" + } + + site_config { + always_on = true + linux_fx_version = "DOCKER|(golang:latest)" + } +} +`, rInt, location, rString) +} + +func testAccAzureRMFunctionApp_appSettingsAlwaysOnLinuxFxVersionConnectionStrings(rInt int, rString, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} - app_settings { +resource "azurerm_storage_account" "test" { + name = "acctestsa%[3]s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_app_service_plan" "test" { + name = "acctestASP-%[1]d" + location = "${azurerm_resource_group.test.location}" + kind = "Linux" + resource_group_name = "${azurerm_resource_group.test.name}" + + sku { + tier = "Standard" + size = "S1" + } + + properties { + reserved = true + } +} + +resource "azurerm_function_app" "test" { + name = "acctest-%[1]d-func" + location = "${azurerm_resource_group.test.location}" + version = "~2" + resource_group_name = "${azurerm_resource_group.test.name}" + app_service_plan_id = "${azurerm_app_service_plan.test.id}" + storage_connection_string = "${azurerm_storage_account.test.primary_connection_string}" + + app_settings = { "hello" = "world" } site_config { - always_on = true + always_on = true + linux_fx_version = "DOCKER|(golang:latest)" } connection_string { diff --git a/azurerm/resource_arm_image_test.go b/azurerm/resource_arm_image_test.go index 2616e8db2ade..f356b3f5c7f2 100644 --- a/azurerm/resource_arm_image_test.go +++ b/azurerm/resource_arm_image_test.go @@ -446,7 +446,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -490,7 +490,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -547,7 +547,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -591,7 +591,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -610,7 +610,7 @@ resource "azurerm_image" "test" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -637,7 +637,7 @@ resource "azurerm_image" "import" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -694,7 +694,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -738,7 +738,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -795,7 +795,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -839,7 +839,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -858,7 +858,7 @@ resource "azurerm_image" "testdestination" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -903,7 +903,7 @@ resource "azurerm_virtual_machine" "testdestination" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -983,7 +983,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -1063,7 +1063,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -1075,7 +1075,7 @@ resource "azurerm_image" "testdestination" { resource_group_name = "${azurerm_resource_group.test.name}" source_virtual_machine_id = "${azurerm_virtual_machine.testsource.id}" - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -1120,7 +1120,7 @@ resource "azurerm_virtual_machine" "testdestination" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -1177,7 +1177,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -1221,7 +1221,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -1278,7 +1278,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -1322,7 +1322,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -1341,7 +1341,7 @@ resource "azurerm_image" "testdestination" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } diff --git a/azurerm/resource_arm_iothub.go b/azurerm/resource_arm_iothub.go index c5b506c3e6bd..4a6bbd7d0849 100755 --- a/azurerm/resource_arm_iothub.go +++ b/azurerm/resource_arm_iothub.go @@ -4,13 +4,13 @@ import ( "context" "fmt" "log" + "regexp" "strconv" "time" "strings" - "github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub" - "github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices" + "github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" @@ -192,8 +192,9 @@ func resourceArmIotHub() *schema.Resource { Optional: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(eventhub.Avro), - string(eventhub.AvroDeflate), + string(devices.Avro), + string(devices.AvroDeflate), + string(devices.JSON), }, true), }, "file_name_format": { @@ -211,9 +212,12 @@ func resourceArmIotHub() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 64), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-_.a-zA-Z0-9]{1,64}$"), + "Route Name name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.", + ), }, "source": { Type: schema.TypeString, @@ -248,6 +252,50 @@ func resourceArmIotHub() *schema.Resource { }, }, + "fallback_route": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeString, + Optional: true, + Default: "DeviceMessages", + ValidateFunc: validation.StringInSlice([]string{ + "DeviceJobLifecycleEvents", + "DeviceLifecycleEvents", + "DeviceMessages", + "Invalid", + "TwinChangeEvents", + }, false), + }, + "condition": { + // The condition is a string value representing device-to-cloud message routes query expression + // https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-query-language#device-to-cloud-message-routes-query-expressions + Type: schema.TypeString, + Optional: true, + Default: "true", + }, + "endpoint_names": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(0, 64), + }, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + }, + }, + "tags": tagsSchema(), }, } @@ -292,6 +340,7 @@ func resourceArmIotHubCreateUpdate(d *schema.ResourceData, meta interface{}) err location := azureRMNormalizeLocation(d.Get("location").(string)) skuInfo := expandIoTHubSku(d) tags := d.Get("tags").(map[string]interface{}) + fallbackRoute := expandIoTHubFallbackRoute(d) endpoints, err := expandIoTHubEndpoints(d, subscriptionID) if err != nil { @@ -306,8 +355,9 @@ func resourceArmIotHubCreateUpdate(d *schema.ResourceData, meta interface{}) err Sku: skuInfo, Properties: &devices.IotHubProperties{ Routing: &devices.RoutingProperties{ - Endpoints: endpoints, - Routes: routes, + Endpoints: endpoints, + Routes: routes, + FallbackRoute: fallbackRoute, }, }, Tags: expandTags(tags), @@ -328,6 +378,7 @@ func resourceArmIotHubCreateUpdate(d *schema.ResourceData, meta interface{}) err } d.SetId(*resp.ID) + return resourceArmIotHubRead(d, meta) } @@ -393,6 +444,11 @@ func resourceArmIotHubRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("route", routes); err != nil { return fmt.Errorf("Error setting `route` in IoTHub %q: %+v", name, err) } + + fallbackRoute := flattenIoTHubFallbackRoute(properties.Routing) + if err := d.Set("fallback_route", fallbackRoute); err != nil { + return fmt.Errorf("Error setting `fallbackRoute` in IoTHub %q: %+v", name, err) + } } d.Set("name", name) @@ -479,10 +535,6 @@ func expandIoTHubRoutes(d *schema.ResourceData) *[]devices.RouteProperties { condition := route["condition"].(string) endpointNamesRaw := route["endpoint_names"].([]interface{}) - endpointsNames := make([]string, 0) - for _, n := range endpointNamesRaw { - endpointsNames = append(endpointsNames, n.(string)) - } isEnabled := route["enabled"].(bool) @@ -490,7 +542,7 @@ func expandIoTHubRoutes(d *schema.ResourceData) *[]devices.RouteProperties { Name: &name, Source: source, Condition: &condition, - EndpointNames: &endpointsNames, + EndpointNames: utils.ExpandStringArray(endpointNamesRaw), IsEnabled: &isEnabled, }) } @@ -532,7 +584,7 @@ func expandIoTHubEndpoints(d *schema.ResourceData, subscriptionId string) (*devi FileNameFormat: &fileNameFormat, BatchFrequencyInSeconds: &batchFrequencyInSeconds, MaxChunkSizeInBytes: &maxChunkSizeInBytes, - Encoding: &encoding, + Encoding: devices.Encoding(encoding), } storageContainerProperties = append(storageContainerProperties, storageContainer) @@ -573,6 +625,26 @@ func expandIoTHubEndpoints(d *schema.ResourceData, subscriptionId string) (*devi }, nil } +func expandIoTHubFallbackRoute(d *schema.ResourceData) *devices.FallbackRouteProperties { + fallbackRouteList := d.Get("fallback_route").([]interface{}) + if len(fallbackRouteList) == 0 { + return nil + } + + fallbackRouteMap := fallbackRouteList[0].(map[string]interface{}) + + source := fallbackRouteMap["source"].(string) + condition := fallbackRouteMap["condition"].(string) + isEnabled := fallbackRouteMap["enabled"].(bool) + + return &devices.FallbackRouteProperties{ + Source: &source, + Condition: &condition, + EndpointNames: utils.ExpandStringArray(fallbackRouteMap["endpoint_names"].([]interface{})), + IsEnabled: &isEnabled, + } +} + func expandIoTHubSku(d *schema.ResourceData) *devices.IotHubSkuInfo { skuList := d.Get("sku").([]interface{}) skuMap := skuList[0].(map[string]interface{}) @@ -654,9 +726,8 @@ func flattenIoTHubEndpoint(input *devices.RoutingProperties) []interface{} { if chunkSize := container.MaxChunkSizeInBytes; chunkSize != nil { output["max_chunk_size_in_bytes"] = *chunkSize } - if encoding := container.Encoding; encoding != nil { - output["encoding"] = *encoding - } + + output["encoding"] = string(container.Encoding) output["type"] = "AzureIotHub.StorageContainer" results = append(results, output) @@ -740,6 +811,29 @@ func flattenIoTHubRoute(input *devices.RoutingProperties) []interface{} { return results } +func flattenIoTHubFallbackRoute(input *devices.RoutingProperties) []interface{} { + if input.FallbackRoute == nil { + return []interface{}{} + } + + output := make(map[string]interface{}) + route := input.FallbackRoute + + if condition := route.Condition; condition != nil { + output["condition"] = *condition + } + if isEnabled := route.IsEnabled; isEnabled != nil { + output["enabled"] = *isEnabled + } + if source := route.Source; source != nil { + output["source"] = *source + } + + output["endpoint_names"] = utils.FlattenStringArray(route.EndpointNames) + + return []interface{}{output} +} + func validateIoTHubEndpointName(v interface{}, _ string) (warnings []string, errors []error) { value := v.(string) diff --git a/azurerm/resource_arm_iothub_consumer_group_test.go b/azurerm/resource_arm_iothub_consumer_group_test.go index 6ebe84046871..c24be32ffde3 100644 --- a/azurerm/resource_arm_iothub_consumer_group_test.go +++ b/azurerm/resource_arm_iothub_consumer_group_test.go @@ -165,7 +165,7 @@ resource "azurerm_iothub" "test" { capacity = "1" } - tags { + tags = { "purpose" = "testing" } } diff --git a/azurerm/resource_arm_iothub_test.go b/azurerm/resource_arm_iothub_test.go index b76529adacb4..c89be0f3fd78 100644 --- a/azurerm/resource_arm_iothub_test.go +++ b/azurerm/resource_arm_iothub_test.go @@ -113,6 +113,33 @@ func TestAccAzureRMIotHub_customRoutes(t *testing.T) { }) } +func TestAccAzureRMIotHub_fallbackRoute(t *testing.T) { + resourceName := "azurerm_iothub.test" + rInt := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMIotHubDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMIotHub_fallbackRoute(rInt, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMIotHubExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "fallback_route.0.source", "DeviceMessages"), + resource.TestCheckResourceAttr(resourceName, "fallback_route.0.endpoint_names.#", "1"), + resource.TestCheckResourceAttr(resourceName, "fallback_route.0.enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testCheckAzureRMIotHubDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*ArmClient).iothubResourceClient ctx := testAccProvider.Meta().(*ArmClient).StopContext @@ -185,7 +212,7 @@ resource "azurerm_iothub" "test" { capacity = "1" } - tags { + tags = { "purpose" = "testing" } } @@ -208,7 +235,7 @@ resource "azurerm_iothub" "import" { capacity = "1" } - tags { + tags = { "purpose" = "testing" } } @@ -233,7 +260,7 @@ resource "azurerm_iothub" "test" { capacity = "1" } - tags { + tags = { "purpose" = "testing" } } @@ -292,9 +319,40 @@ resource "azurerm_iothub" "test" { enabled = true } - tags { + tags = { "purpose" = "testing" } } `, rInt, location, rStr, rInt) } + +func testAccAzureRMIotHub_fallbackRoute(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_iothub" "test" { + name = "acctestIoTHub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + sku { + name = "S1" + tier = "Standard" + capacity = "1" + } + + fallback_route { + source = "DeviceMessages" + endpoint_names = ["events"] + enabled = true + } + + tags = { + "purpose" = "testing" + } +} +`, rInt, location, rInt) +} diff --git a/azurerm/resource_arm_key_vault.go b/azurerm/resource_arm_key_vault.go index 1eafc69a9700..38fbf51a986b 100644 --- a/azurerm/resource_arm_key_vault.go +++ b/azurerm/resource_arm_key_vault.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" - "github.com/satori/go.uuid" + uuid "github.com/satori/go.uuid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/set" diff --git a/azurerm/resource_arm_key_vault_access_policy.go b/azurerm/resource_arm_key_vault_access_policy.go index 0bc545255ce6..b4e31181f6f2 100644 --- a/azurerm/resource_arm_key_vault_access_policy.go +++ b/azurerm/resource_arm_key_vault_access_policy.go @@ -3,11 +3,14 @@ package azurerm import ( "fmt" "log" + "regexp" "strings" + "github.com/satori/go.uuid" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2018-02-14/keyvault" "github.com/hashicorp/terraform/helper/schema" - "github.com/satori/go.uuid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -24,13 +27,52 @@ func resourceArmKeyVaultAccessPolicy() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "key_vault_id": { + Type: schema.TypeString, + Optional: true, //todo required in 2.0 + Computed: true, //todo removed in 2.0 + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"vault_name"}, + }, + + //todo remove in 2.0 "vault_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated in favour of the key_vault_id property. This will prevent a class of bugs as described in https://github.com/terraform-providers/terraform-provider-azurerm/issues/2396 and will be removed in version 2.0 of the provider", + ValidateFunc: validate.NoEmptyStrings, + ConflictsWith: []string{"key_vault_id"}, }, - "resource_group_name": resourceGroupNameSchema(), + //todo remove in 2.0 + "resource_group_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated as the resource group is now pulled from the vault ID and will be removed in version 2.0 of the provider", + ValidateFunc: func(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + + if len(value) > 80 { + errors = append(errors, fmt.Errorf("%q may not exceed 80 characters in length", k)) + } + + if strings.HasSuffix(value, ".") { + errors = append(errors, fmt.Errorf("%q may not end with a period", k)) + } + + // regex pulled from https://docs.microsoft.com/en-us/rest/api/resources/resourcegroups/createorupdate + if matched := regexp.MustCompile(`^[-\w\._\(\)]+$`).Match([]byte(value)); !matched { + errors = append(errors, fmt.Errorf("%q may only contain alphanumeric characters, dash, underscores, parentheses and periods", k)) + } + + return warnings, errors + }, + }, "tenant_id": { Type: schema.TypeString, @@ -67,8 +109,9 @@ func resourceArmKeyVaultAccessPolicyCreateOrDelete(d *schema.ResourceData, meta ctx := meta.(*ArmClient).StopContext log.Printf("[INFO] Preparing arguments for Key Vault Access Policy: %s.", action) + vaultId := d.Get("key_vault_id").(string) vaultName := d.Get("vault_name").(string) - resGroup := d.Get("resource_group_name").(string) + resourceGroup := d.Get("resource_group_name").(string) tenantIdRaw := d.Get("tenant_id").(string) tenantId, err := uuid.FromString(tenantIdRaw) @@ -79,15 +122,40 @@ func resourceArmKeyVaultAccessPolicyCreateOrDelete(d *schema.ResourceData, meta applicationIdRaw := d.Get("application_id").(string) objectId := d.Get("object_id").(string) - keyVault, err := client.Get(ctx, resGroup, vaultName) + if vaultName == "" { + if vaultId == "" { + return fmt.Errorf("one of `key_vault_id` or `vault_name` must be set") + } + id, err2 := azure.ParseAzureResourceID(vaultId) + if err2 != nil { + return err2 + } + + resourceGroup = id.ResourceGroup + + vaultNameTemp, ok := id.Path["vaults"] + if !ok { + return fmt.Errorf("key_value_id does not contain `vaults`: %q", vaultId) + } + vaultName = vaultNameTemp + + } else if resourceGroup == "" { + return fmt.Errorf("one of `resource_group_name` must be set when `vault_name` is used") + } + + keyVault, err := client.Get(ctx, resourceGroup, vaultName) if err != nil { - if utils.ResponseWasNotFound(keyVault.Response) { - log.Printf("[DEBUG] Parent Key Vault %q was not found in Resource Group %q - removing from state!", vaultName, resGroup) + // If the key vault does not exist but this is not a new resource, the policy + // which previously existed was deleted with the key vault, so reflect that in + // state. If this is a new resource and key vault does not exist, it's likely + // a bad ID was given. + if utils.ResponseWasNotFound(keyVault.Response) && !d.IsNewResource() { + log.Printf("[DEBUG] Parent Key Vault %q was not found in Resource Group %q - removing from state!", vaultName, resourceGroup) d.SetId("") return nil } - return fmt.Errorf("Error retrieving Key Vault %q (Resource Group %q): %+v", vaultName, resGroup, err) + return fmt.Errorf("Error retrieving Key Vault %q (Resource Group %q): %+v", vaultName, resourceGroup, err) } // This is because azure doesn't have an 'id' for a keyvault access policy @@ -153,7 +221,7 @@ func resourceArmKeyVaultAccessPolicyCreateOrDelete(d *schema.ResourceData, meta if applicationIdRaw != "" { applicationId, err2 := uuid.FromString(applicationIdRaw) if err2 != nil { - return fmt.Errorf("Error parsing Appliciation ID %q as a UUID: %+v", applicationIdRaw, err2) + return fmt.Errorf("Error parsing Application ID %q as a UUID: %+v", applicationIdRaw, err2) } accessPolicy.ApplicationID = &applicationId @@ -168,18 +236,17 @@ func resourceArmKeyVaultAccessPolicyCreateOrDelete(d *schema.ResourceData, meta }, } - _, err = client.UpdateAccessPolicy(ctx, resGroup, vaultName, action, parameters) - if err != nil { - return fmt.Errorf("Error updating Access Policy (Object ID %q / Application ID %q) for Key Vault %q (Resource Group %q): %+v", objectId, applicationIdRaw, vaultName, resGroup, err) + if _, err = client.UpdateAccessPolicy(ctx, resourceGroup, vaultName, action, parameters); err != nil { + return fmt.Errorf("Error updating Access Policy (Object ID %q / Application ID %q) for Key Vault %q (Resource Group %q): %+v", objectId, applicationIdRaw, vaultName, resourceGroup, err) } - read, err := client.Get(ctx, resGroup, vaultName) + read, err := client.Get(ctx, resourceGroup, vaultName) if err != nil { - return fmt.Errorf("Error retrieving Key Vault %q (Resource Group %q): %+v", vaultName, resGroup, err) + return fmt.Errorf("Error retrieving Key Vault %q (Resource Group %q): %+v", vaultName, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Cannot read KeyVault %q (Resource Group %q) ID", vaultName, resGroup) + return fmt.Errorf("Cannot read KeyVault %q (Resource Group %q) ID", vaultName, resourceGroup) } if d.IsNewResource() { @@ -237,6 +304,7 @@ func resourceArmKeyVaultAccessPolicyRead(d *schema.ResourceData, meta interface{ return nil } + d.Set("key_vault_id", resp.ID) d.Set("vault_name", resp.Name) d.Set("resource_group_name", resGroup) d.Set("object_id", objectId) diff --git a/azurerm/resource_arm_key_vault_access_policy_test.go b/azurerm/resource_arm_key_vault_access_policy_test.go index c26559c17a2f..137ad3899e6c 100644 --- a/azurerm/resource_arm_key_vault_access_policy_test.go +++ b/azurerm/resource_arm_key_vault_access_policy_test.go @@ -2,6 +2,7 @@ package azurerm import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -38,7 +39,40 @@ func TestAccAzureRMKeyVaultAccessPolicy_basic(t *testing.T) { }) } +func TestAccAzureRMKeyVaultAccessPolicy_basicClassic(t *testing.T) { + resourceName := "azurerm_key_vault_access_policy.test" + rs := acctest.RandString(6) + config := testAccAzureRMKeyVaultAccessPolicy_basicClassic(rs, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKeyVaultDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKeyVaultAccessPolicyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "key_permissions.0", "get"), + resource.TestCheckResourceAttr(resourceName, "secret_permissions.0", "get"), + resource.TestCheckResourceAttr(resourceName, "secret_permissions.1", "set"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMKeyVaultAccessPolicy_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_key_vault_access_policy.test" rs := acctest.RandString(6) location := testLocation() @@ -142,22 +176,46 @@ func TestAccAzureRMKeyVaultAccessPolicy_update(t *testing.T) { }) } +func TestAccAzureRMKeyVaultAccessPolicy_nonExistentVault(t *testing.T) { + rs := acctest.RandString(6) + config := testAccAzureRMKeyVaultAccessPolicy_nonExistentVault(rs, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKeyVaultDestroy, + Steps: []resource.TestStep{ + { + Config: config, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`Error retrieving Key Vault`), + }, + }, + }) +} + func testCheckAzureRMKeyVaultAccessPolicyExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { return fmt.Errorf("Not found: %s", resourceName) } - vaultName := rs.Primary.Attributes["vault_name"] - resGroup := rs.Primary.Attributes["resource_group_name"] + id, err := parseAzureResourceID(rs.Primary.ID) + + if err != nil { + return err + } + resGroup := id.ResourceGroup + vaultName := id.Path["vaults"] + objectId := rs.Primary.Attributes["object_id"] applicationId := rs.Primary.Attributes["application_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext - resp, err := client.Get(ctx, resGroup, vaultName) if err != nil { if utils.ResponseWasNotFound(resp.Response) { @@ -184,9 +242,32 @@ func testAccAzureRMKeyVaultAccessPolicy_basic(rString string, location string) s return fmt.Sprintf(` %s +resource "azurerm_key_vault_access_policy" "test" { + key_vault_id = "${azurerm_key_vault.test.id}" + + key_permissions = [ + "get", + ] + + secret_permissions = [ + "get", + "set", + ] + + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + object_id = "${data.azurerm_client_config.current.service_principal_object_id}" +} +`, template) +} + +func testAccAzureRMKeyVaultAccessPolicy_basicClassic(rString string, location string) string { + template := testAccAzureRMKeyVaultAccessPolicy_template(rString, location) + return fmt.Sprintf(` +%s + resource "azurerm_key_vault_access_policy" "test" { vault_name = "${azurerm_key_vault.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" + resource_group_name = "${azurerm_key_vault.test.resource_group_name}" key_permissions = [ "get", @@ -209,10 +290,9 @@ func testAccAzureRMKeyVaultAccessPolicy_requiresImport(rString string, location %s resource "azurerm_key_vault_access_policy" "import" { - vault_name = "${azurerm_key_vault_access_policy.test.vault_name}" - resource_group_name = "${azurerm_key_vault_access_policy.test.resource_group_name}" - tenant_id = "${azurerm_key_vault_access_policy.test.tenant_id}" - object_id = "${azurerm_key_vault_access_policy.test.object_id}" + key_vault_id = "${azurerm_key_vault.test.id}" + tenant_id = "${azurerm_key_vault_access_policy.test.tenant_id}" + object_id = "${azurerm_key_vault_access_policy.test.object_id}" key_permissions = [ "get", @@ -232,8 +312,7 @@ func testAccAzureRMKeyVaultAccessPolicy_multiple(rString string, location string %s resource "azurerm_key_vault_access_policy" "test_with_application_id" { - vault_name = "${azurerm_key_vault.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" + key_vault_id = "${azurerm_key_vault.test.id}" key_permissions = [ "create", @@ -256,8 +335,7 @@ resource "azurerm_key_vault_access_policy" "test_with_application_id" { } resource "azurerm_key_vault_access_policy" "test_no_application_id" { - vault_name = "${azurerm_key_vault.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" + key_vault_id = "${azurerm_key_vault.test.id}" key_permissions = [ "list", @@ -321,9 +399,51 @@ resource "azurerm_key_vault" "test" { name = "premium" } - tags { + tags = { environment = "Production" } } `, rString, location, rString) } + +func testAccAzureRMKeyVaultAccessPolicy_nonExistentVault(rString string, location string) string { + return fmt.Sprintf(` +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%s" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctestkv-%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + + sku { + name = "standard" + } + + tags = { + environment = "Production" + } +} + +resource "azurerm_key_vault_access_policy" "test" { + # Must appear to be URL, but not actually exist - appending a string works + key_vault_id = "${azurerm_key_vault.test.id}NOPE" + + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + object_id = "${data.azurerm_client_config.current.service_principal_object_id}" + + key_permissions = [ + "get", + ] + + secret_permissions = [ + "get", + ] +} +`, rString, location, rString) +} diff --git a/azurerm/resource_arm_key_vault_certificate.go b/azurerm/resource_arm_key_vault_certificate.go index 164e716c7319..10f73b1f9ac2 100644 --- a/azurerm/resource_arm_key_vault_certificate.go +++ b/azurerm/resource_arm_key_vault_certificate.go @@ -19,14 +19,38 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) +//todo refactor and find a home for this wayward func +func resourceArmKeyVaultChildResourceImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + client := meta.(*ArmClient).keyVaultClient + ctx := meta.(*ArmClient).StopContext + + id, err := azure.ParseKeyVaultChildID(d.Id()) + if err != nil { + return []*schema.ResourceData{d}, fmt.Errorf("Error Unable to parse ID (%s) for Key Vault Child import: %v", d.Id(), err) + } + + kvid, err := azure.GetKeyVaultIDFromBaseUrl(ctx, client, id.KeyVaultBaseUrl) + if err != nil { + return []*schema.ResourceData{d}, fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if id == nil { + return []*schema.ResourceData{d}, fmt.Errorf("Unable to locate the Resource ID for the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + + d.Set("key_vault_id", kvid) + + return []*schema.ResourceData{d}, nil +} + func resourceArmKeyVaultCertificate() *schema.Resource { return &schema.Resource{ // TODO: support Updating once we have more information about what can be updated Create: resourceArmKeyVaultCertificateCreate, Read: resourceArmKeyVaultCertificateRead, Delete: resourceArmKeyVaultCertificateDelete, + Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceArmKeyVaultChildResourceImporter, }, Schema: map[string]*schema.Schema{ @@ -37,11 +61,24 @@ func resourceArmKeyVaultCertificate() *schema.Resource { ValidateFunc: azure.ValidateKeyVaultChildName, }, + "key_vault_id": { + Type: schema.TypeString, + Optional: true, //todo required in 2.0 + Computed: true, //todo removed in 2.0 + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"vault_uri"}, + }, + + //todo remove in 2.0 "vault_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.URLIsHTTPS, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated in favour of the key_vault_id property. This will prevent a class of bugs as described in https://github.com/terraform-providers/terraform-provider-azurerm/issues/2396 and will be removed in version 2.0 of the provider", + ValidateFunc: validate.URLIsHTTPS, + ConflictsWith: []string{"key_vault_id"}, }, "certificate": { @@ -298,12 +335,33 @@ func resourceArmKeyVaultCertificate() *schema.Resource { } func resourceArmKeyVaultCertificateCreate(d *schema.ResourceData, meta interface{}) error { + vaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext name := d.Get("name").(string) + keyVaultId := d.Get("key_vault_id").(string) keyVaultBaseUrl := d.Get("vault_uri").(string) + if keyVaultBaseUrl == "" { + if keyVaultId == "" { + return fmt.Errorf("one of `key_vault_id` or `vault_uri` must be set") + } + + pKeyVaultBaseUrl, err := azure.GetKeyVaultBaseUrlFromID(ctx, vaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error looking up Certificate %q vault url form id %q: %+v", name, keyVaultId, err) + } + + keyVaultBaseUrl = pKeyVaultBaseUrl + } else { + id, err := azure.GetKeyVaultIDFromBaseUrl(ctx, vaultClient, keyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error unable to find key vault ID from URL %q for certificate %q: %+v", keyVaultBaseUrl, name, err) + } + d.Set("key_vault_id", id) + } + if requireResourcesToBeImported { existing, err := client.GetCertificate(ctx, keyVaultBaseUrl, name, "") if err != nil { @@ -381,6 +439,7 @@ func keyVaultCertificateCreationRefreshFunc(ctx context.Context, client keyvault } func resourceArmKeyVaultCertificateRead(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -389,8 +448,27 @@ func resourceArmKeyVaultCertificateRead(d *schema.ResourceData, meta interface{} return err } - cert, err := client.GetCertificate(ctx, id.KeyVaultBaseUrl, id.Name, "") + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + log.Printf("[DEBUG] Unable to determine the Resource ID for the Key Vault at URL %q - removing from state!", id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Certificate %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Certificate %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + + cert, err := client.GetCertificate(ctx, id.KeyVaultBaseUrl, id.Name, "") if err != nil { if utils.ResponseWasNotFound(cert.Response) { log.Printf("[DEBUG] Certificate %q was not found in Key Vault at URI %q - removing from state", id.Name, id.KeyVaultBaseUrl) @@ -398,7 +476,7 @@ func resourceArmKeyVaultCertificateRead(d *schema.ResourceData, meta interface{} return nil } - return err + return fmt.Errorf("Error reading Key Vault Certificate: %+v", err) } d.Set("name", id.Name) @@ -431,6 +509,7 @@ func resourceArmKeyVaultCertificateRead(d *schema.ResourceData, meta interface{} } func resourceArmKeyVaultCertificateDelete(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -439,6 +518,24 @@ func resourceArmKeyVaultCertificateDelete(d *schema.ResourceData, meta interface return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + return fmt.Errorf("Unable to determine the Resource ID for the Key Vault at URL %q", id.KeyVaultBaseUrl) + } + + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Certificate %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Certificate %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + resp, err := client.DeleteCertificate(ctx, id.KeyVaultBaseUrl, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { diff --git a/azurerm/resource_arm_key_vault_certificate_test.go b/azurerm/resource_arm_key_vault_certificate_test.go index 4b49cde6c00c..f52ef5c45b9a 100644 --- a/azurerm/resource_arm_key_vault_certificate_test.go +++ b/azurerm/resource_arm_key_vault_certificate_test.go @@ -2,8 +2,11 @@ package azurerm import ( "fmt" + "log" "testing" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -37,6 +40,33 @@ func TestAccAzureRMKeyVaultCertificate_basicImportPFX(t *testing.T) { }) } +func TestAccAzureRMKeyVaultCertificate_basicImportPFXClassic(t *testing.T) { + resourceName := "azurerm_key_vault_certificate.test" + rs := acctest.RandString(6) + config := testAccAzureRMKeyVaultCertificate_basicImportPFXClassic(rs, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKeyVaultCertificateDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKeyVaultCertificateExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "certificate_data"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"certificate"}, + }, + }, + }) +} + func TestAccAzureRMKeyVaultCertificate_requiresImport(t *testing.T) { if !requireResourcesToBeImported { t.Skip("Skipping since resources aren't required to be imported") @@ -225,6 +255,16 @@ func testCheckAzureRMKeyVaultCertificateDestroy(s *terraform.State) error { name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] + + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Certificate %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Certificate %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } // get the latest version resp, err := client.GetCertificate(ctx, vaultBaseUrl, name, "") @@ -232,7 +272,7 @@ func testCheckAzureRMKeyVaultCertificateDestroy(s *terraform.State) error { if utils.ResponseWasNotFound(resp.Response) { return nil } - return err + return fmt.Errorf("Bad: Get on keyVault certificate: %+v", err) } return fmt.Errorf("Key Vault Certificate still exists:\n%#v", resp) @@ -243,16 +283,27 @@ func testCheckAzureRMKeyVaultCertificateDestroy(s *terraform.State) error { func testCheckAzureRMKeyVaultCertificateExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { return fmt.Errorf("Not found: %s", resourceName) } + name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Certificate %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Certificate %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } resp, err := client.GetCertificate(ctx, vaultBaseUrl, name, "") if err != nil { @@ -260,7 +311,7 @@ func testCheckAzureRMKeyVaultCertificateExists(resourceName string) resource.Tes return fmt.Errorf("Bad: Key Vault Certificate %q (resource group: %q) does not exist", name, vaultBaseUrl) } - return fmt.Errorf("Bad: Get on keyVaultManagementClient: %+v", err) + return fmt.Errorf("Bad: Get on keyVault certificate: %+v", err) } return nil @@ -269,6 +320,9 @@ func testCheckAzureRMKeyVaultCertificateExists(resourceName string) resource.Tes func testCheckAzureRMKeyVaultCertificateDisappears(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -276,9 +330,16 @@ func testCheckAzureRMKeyVaultCertificateDisappears(resourceName string) resource } name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Certificate %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Certificate %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } resp, err := client.DeleteCertificate(ctx, vaultBaseUrl, name) if err != nil { @@ -332,6 +393,74 @@ resource "azurerm_key_vault" "test" { } } +resource "azurerm_key_vault_certificate" "test" { + name = "acctestcert%s" + key_vault_id = "${azurerm_key_vault.test.id}" + + certificate { + contents = "${base64encode(file("testdata/keyvaultcert.pfx"))}" + password = "" + } + + certificate_policy { + issuer_parameters { + name = "Self" + } + + key_properties { + exportable = true + key_size = 2048 + key_type = "RSA" + reuse_key = false + } + + secret_properties { + content_type = "application/x-pkcs12" + } + } +} +`, rString, location, rString, rString) +} + +func testAccAzureRMKeyVaultCertificate_basicImportPFXClassic(rString string, location string) string { + return fmt.Sprintf(` +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%s" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctestkeyvault%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + + sku { + name = "standard" + } + + access_policy { + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + object_id = "${data.azurerm_client_config.current.service_principal_object_id}" + + certificate_permissions = [ + "delete", + "import", + "get", + ] + + key_permissions = [ + "create", + ] + + secret_permissions = [ + "set", + ] + } +} + resource "azurerm_key_vault_certificate" "test" { name = "acctestcert%s" vault_uri = "${azurerm_key_vault.test.vault_uri}" @@ -367,8 +496,8 @@ func testAccAzureRMKeyVaultCertificate_requiresImport(rString string, location s %s resource "azurerm_key_vault_certificate" "import" { - name = "${azurerm_key_vault_certificate.test.name}" - vault_uri = "${azurerm_key_vault_certificate.test.vault_uri}" + name = "${azurerm_key_vault_certificate.test.name}" + key_vault_id = "${azurerm_key_vault.test.id}" certificate { contents = "${base64encode(file("testdata/keyvaultcert.pfx"))}" @@ -436,7 +565,7 @@ resource "azurerm_key_vault" "test" { resource "azurerm_key_vault_certificate" "test" { name = "acctestcert%s" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + key_vault_id = "${azurerm_key_vault.test.id}" certificate_policy { issuer_parameters { @@ -523,8 +652,8 @@ resource "azurerm_key_vault" "test" { } resource "azurerm_key_vault_certificate" "test" { - name = "acctestcert%s" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + name = "acctestcert%s" + key_vault_id = "${azurerm_key_vault.test.id}" certificate_policy { issuer_parameters { @@ -618,8 +747,8 @@ resource "azurerm_key_vault" "test" { } resource "azurerm_key_vault_certificate" "test" { - name = "acctestcert%s" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + name = "acctestcert%s" + key_vault_id = "${azurerm_key_vault.test.id}" certificate_policy { issuer_parameters { @@ -662,7 +791,7 @@ resource "azurerm_key_vault_certificate" "test" { } } - tags { + tags = { "hello" = "world" } } diff --git a/azurerm/resource_arm_key_vault_key.go b/azurerm/resource_arm_key_vault_key.go index 9b04ca0d8aae..df3701fee596 100644 --- a/azurerm/resource_arm_key_vault_key.go +++ b/azurerm/resource_arm_key_vault_key.go @@ -20,7 +20,7 @@ func resourceArmKeyVaultKey() *schema.Resource { Update: resourceArmKeyVaultKeyUpdate, Delete: resourceArmKeyVaultKeyDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceArmKeyVaultChildResourceImporter, }, Schema: map[string]*schema.Schema{ @@ -31,11 +31,24 @@ func resourceArmKeyVaultKey() *schema.Resource { ValidateFunc: azure.ValidateKeyVaultChildName, }, + "key_vault_id": { + Type: schema.TypeString, + Optional: true, //todo required in 2.0 + Computed: true, //todo removed in 2.0 + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"vault_uri"}, + }, + + //todo remove in 2.0 "vault_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.URLIsHTTPS, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated in favour of the key_vault_id property. This will prevent a class of bugs as described in https://github.com/terraform-providers/terraform-provider-azurerm/issues/2396 and will be removed in version 2.0 of the provider", + ValidateFunc: validate.URLIsHTTPS, + ConflictsWith: []string{"key_vault_id"}, }, "key_type": { @@ -99,18 +112,39 @@ func resourceArmKeyVaultKey() *schema.Resource { } func resourceArmKeyVaultKeyCreate(d *schema.ResourceData, meta interface{}) error { + vaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext log.Print("[INFO] preparing arguments for AzureRM KeyVault Key creation.") name := d.Get("name").(string) - keyVaultBaseUrl := d.Get("vault_uri").(string) + keyVaultBaseUri := d.Get("vault_uri").(string) + keyVaultId := d.Get("key_vault_id").(string) + + if keyVaultBaseUri == "" { + if keyVaultId == "" { + return fmt.Errorf("one of `key_vault_id` or `vault_uri` must be set") + } + + pKeyVaultBaseUrl, err := azure.GetKeyVaultBaseUrlFromID(ctx, vaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error looking up Key %q vault url form id %q: %+v", name, keyVaultId, err) + } + + keyVaultBaseUri = pKeyVaultBaseUrl + } else { + id, err := azure.GetKeyVaultIDFromBaseUrl(ctx, vaultClient, keyVaultBaseUri) + if err != nil { + return fmt.Errorf("Error unable to find key vault ID from URL %q for certificate %q: %+v", keyVaultBaseUri, name, err) + } + d.Set("key_vault_id", id) + } if requireResourcesToBeImported { - existing, err := client.GetKey(ctx, keyVaultBaseUrl, name, "") + existing, err := client.GetKey(ctx, keyVaultBaseUri, name, "") if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing Key %q (Key Vault %q): %s", name, keyVaultBaseUrl, err) + return fmt.Errorf("Error checking for presence of existing Key %q (Key Vault %q): %s", name, keyVaultBaseUri, err) } } @@ -135,12 +169,12 @@ func resourceArmKeyVaultKeyCreate(d *schema.ResourceData, meta interface{}) erro Tags: expandTags(tags), } - if _, err := client.CreateKey(ctx, keyVaultBaseUrl, name, parameters); err != nil { + if _, err := client.CreateKey(ctx, keyVaultBaseUri, name, parameters); err != nil { return fmt.Errorf("Error Creating Key: %+v", err) } // "" indicates the latest version - read, err := client.GetKey(ctx, keyVaultBaseUrl, name, "") + read, err := client.GetKey(ctx, keyVaultBaseUri, name, "") if err != nil { return err } @@ -151,15 +185,33 @@ func resourceArmKeyVaultKeyCreate(d *schema.ResourceData, meta interface{}) erro } func resourceArmKeyVaultKeyUpdate(d *schema.ResourceData, meta interface{}) error { + vaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext - log.Print("[INFO] preparing arguments for AzureRM KeyVault Key update.") id, err := azure.ParseKeyVaultChildID(d.Id()) if err != nil { return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, vaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + return fmt.Errorf("Unable to determine the Resource ID for the Key Vault at URL %q", id.KeyVaultBaseUrl) + } + + ok, err := azure.KeyVaultExists(ctx, vaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Key %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Key %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + keyOptions := expandKeyVaultKeyOptions(d) tags := d.Get("tags").(map[string]interface{}) @@ -171,8 +223,7 @@ func resourceArmKeyVaultKeyUpdate(d *schema.ResourceData, meta interface{}) erro Tags: expandTags(tags), } - _, err = client.UpdateKey(ctx, id.KeyVaultBaseUrl, id.Name, id.Version, parameters) - if err != nil { + if _, err = client.UpdateKey(ctx, id.KeyVaultBaseUrl, id.Name, id.Version, parameters); err != nil { return err } @@ -180,6 +231,7 @@ func resourceArmKeyVaultKeyUpdate(d *schema.ResourceData, meta interface{}) erro } func resourceArmKeyVaultKeyRead(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -188,6 +240,26 @@ func resourceArmKeyVaultKeyRead(d *schema.ResourceData, meta interface{}) error return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + log.Printf("[DEBUG] Unable to determine the Resource ID for the Key Vault at URL %q - removing from state!", id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Key %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Key %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + resp, err := client.GetKey(ctx, id.KeyVaultBaseUrl, id.Name, "") if err != nil { if utils.ResponseWasNotFound(resp.Response) { @@ -222,6 +294,7 @@ func resourceArmKeyVaultKeyRead(d *schema.ResourceData, meta interface{}) error } func resourceArmKeyVaultKeyDelete(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -230,6 +303,24 @@ func resourceArmKeyVaultKeyDelete(d *schema.ResourceData, meta interface{}) erro return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + return fmt.Errorf("Unable to determine the Resource ID for the Key Vault at URL %q", id.KeyVaultBaseUrl) + } + + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Key %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Key %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + _, err = client.DeleteKey(ctx, id.KeyVaultBaseUrl, id.Name) return err } diff --git a/azurerm/resource_arm_key_vault_key_test.go b/azurerm/resource_arm_key_vault_key_test.go index ad09183b6201..b9c37fd56431 100644 --- a/azurerm/resource_arm_key_vault_key_test.go +++ b/azurerm/resource_arm_key_vault_key_test.go @@ -2,8 +2,11 @@ package azurerm import ( "fmt" + "log" "testing" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -36,6 +39,32 @@ func TestAccAzureRMKeyVaultKey_basicEC(t *testing.T) { }) } +func TestAccAzureRMKeyVaultKey_basicECClassic(t *testing.T) { + resourceName := "azurerm_key_vault_key.test" + rs := acctest.RandString(6) + config := testAccAzureRMKeyVaultKey_basicECClassic(rs, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKeyVaultKeyDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKeyVaultKeyExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"key_size"}, + }, + }, + }) +} + func TestAccAzureRMKeyVaultKey_requiresImport(t *testing.T) { if !requireResourcesToBeImported { t.Skip("Skipping since resources aren't required to be imported") @@ -230,6 +259,16 @@ func testCheckAzureRMKeyVaultKeyDestroy(s *terraform.State) error { name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] + + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } // get the latest version resp, err := client.GetKey(ctx, vaultBaseUrl, name, "") @@ -248,6 +287,9 @@ func testCheckAzureRMKeyVaultKeyDestroy(s *terraform.State) error { func testCheckAzureRMKeyVaultKeyExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -255,9 +297,16 @@ func testCheckAzureRMKeyVaultKeyExists(resourceName string) resource.TestCheckFu } name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Key %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Key %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } resp, err := client.GetKey(ctx, vaultBaseUrl, name, "") if err != nil { @@ -274,6 +323,9 @@ func testCheckAzureRMKeyVaultKeyExists(resourceName string) resource.TestCheckFu func testCheckAzureRMKeyVaultKeyDisappears(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -282,9 +334,16 @@ func testCheckAzureRMKeyVaultKeyDisappears(resourceName string) resource.TestChe name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Key %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Key %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } resp, err := client.DeleteKey(ctx, vaultBaseUrl, name) if err != nil { @@ -335,7 +394,62 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { + environment = "Production" + } +} + +resource "azurerm_key_vault_key" "test" { + name = "key-%s" + key_vault_id = "${azurerm_key_vault.test.id}" + key_type = "EC" + key_size = 2048 + + key_opts = [ + "sign", + "verify", + ] +} +`, rString, location, rString, rString) +} + +func testAccAzureRMKeyVaultKey_basicECClassic(rString string, location string) string { + return fmt.Sprintf(` +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%s" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctestkv-%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + + sku { + name = "premium" + } + + access_policy { + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + object_id = "${data.azurerm_client_config.current.service_principal_object_id}" + + key_permissions = [ + "create", + "delete", + "get", + ] + + secret_permissions = [ + "get", + "delete", + "set", + ] + } + + tags = { environment = "Production" } } @@ -361,7 +475,7 @@ func testAccAzureRMKeyVaultKey_requiresImport(rString string, location string) s resource "azurerm_key_vault_key" "import" { name = "${azurerm_key_vault_key.test.name}" - vault_uri = "${azurerm_key_vault_key.test.vault_uri}" + key_vault_id = "${azurerm_key_vault.test.id}" key_type = "EC" key_size = 2048 @@ -410,14 +524,14 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_key" "test" { name = "key-%s" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + key_vault_id = "${azurerm_key_vault.test.id}" key_type = "RSA" key_size = 2048 @@ -469,14 +583,14 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_key" "test" { name = "key-%s" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + key_vault_id = "${azurerm_key_vault.test.id}" key_type = "RSA-HSM" key_size = 2048 @@ -528,16 +642,16 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_key" "test" { - name = "key-%s" - vault_uri = "${azurerm_key_vault.test.vault_uri}" - key_type = "RSA" - key_size = 2048 + name = "key-%s" + key_vault_id = "${azurerm_key_vault.test.id}" + key_type = "RSA" + key_size = 2048 key_opts = [ "decrypt", @@ -548,7 +662,7 @@ resource "azurerm_key_vault_key" "test" { "wrapKey", ] - tags { + tags = { "hello" = "world" } } @@ -592,7 +706,7 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_key_vault_secret.go b/azurerm/resource_arm_key_vault_secret.go index 0a82fa88d8a0..cf54c2b1de74 100644 --- a/azurerm/resource_arm_key_vault_secret.go +++ b/azurerm/resource_arm_key_vault_secret.go @@ -19,7 +19,7 @@ func resourceArmKeyVaultSecret() *schema.Resource { Update: resourceArmKeyVaultSecretUpdate, Delete: resourceArmKeyVaultSecretDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceArmKeyVaultChildResourceImporter, }, Schema: map[string]*schema.Schema{ @@ -30,11 +30,24 @@ func resourceArmKeyVaultSecret() *schema.Resource { ValidateFunc: azure.ValidateKeyVaultChildName, }, + "key_vault_id": { + Type: schema.TypeString, + Optional: true, //todo required in 2.0 + Computed: true, //todo removed in 2.0 + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"vault_uri"}, + }, + + //todo remove in 2.0 "vault_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.URLIsHTTPS, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Deprecated: "This property has been deprecated in favour of the key_vault_id property. This will prevent a class of bugs as described in https://github.com/terraform-providers/terraform-provider-azurerm/issues/2396 and will be removed in version 2.0 of the provider", + ValidateFunc: validate.URLIsHTTPS, + ConflictsWith: []string{"key_vault_id"}, }, "value": { @@ -59,6 +72,7 @@ func resourceArmKeyVaultSecret() *schema.Resource { } func resourceArmKeyVaultSecretCreate(d *schema.ResourceData, meta interface{}) error { + vaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -66,6 +80,26 @@ func resourceArmKeyVaultSecretCreate(d *schema.ResourceData, meta interface{}) e name := d.Get("name").(string) keyVaultBaseUrl := d.Get("vault_uri").(string) + keyVaultId := d.Get("key_vault_id").(string) + + if keyVaultBaseUrl == "" { + if keyVaultId == "" { + return fmt.Errorf("one of `key_vault_id` or `vault_uri` must be set") + } + + pKeyVaultBaseUrl, err := azure.GetKeyVaultBaseUrlFromID(ctx, vaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error looking up Secret %q vault url form id %q: %+v", name, keyVaultId, err) + } + + keyVaultBaseUrl = pKeyVaultBaseUrl + } else { + id, err := azure.GetKeyVaultIDFromBaseUrl(ctx, vaultClient, keyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error unable to find key vault ID from URL %q for certificate %q: %+v", keyVaultBaseUrl, name, err) + } + d.Set("key_vault_id", id) + } if requireResourcesToBeImported { existing, err := client.GetSecret(ctx, keyVaultBaseUrl, name, "") @@ -109,6 +143,7 @@ func resourceArmKeyVaultSecretCreate(d *schema.ResourceData, meta interface{}) e } func resourceArmKeyVaultSecretUpdate(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext log.Print("[INFO] preparing arguments for AzureRM KeyVault Secret update.") @@ -118,6 +153,24 @@ func resourceArmKeyVaultSecretUpdate(d *schema.ResourceData, meta interface{}) e return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + return fmt.Errorf("Unable to determine the Resource ID for the Key Vault at URL %q", id.KeyVaultBaseUrl) + } + + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + value := d.Get("value").(string) contentType := d.Get("content_type").(string) tags := d.Get("tags").(map[string]interface{}) @@ -161,6 +214,7 @@ func resourceArmKeyVaultSecretUpdate(d *schema.ResourceData, meta interface{}) e } func resourceArmKeyVaultSecretRead(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -169,6 +223,26 @@ func resourceArmKeyVaultSecretRead(d *schema.ResourceData, meta interface{}) err return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + log.Printf("[DEBUG] Unable to determine the Resource ID for the Key Vault at URL %q - removing from state!", id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + // we always want to get the latest version resp, err := client.GetSecret(ctx, id.KeyVaultBaseUrl, id.Name, "") if err != nil { @@ -197,6 +271,7 @@ func resourceArmKeyVaultSecretRead(d *schema.ResourceData, meta interface{}) err } func resourceArmKeyVaultSecretDelete(d *schema.ResourceData, meta interface{}) error { + keyVaultClient := meta.(*ArmClient).keyVaultClient client := meta.(*ArmClient).keyVaultManagementClient ctx := meta.(*ArmClient).StopContext @@ -205,6 +280,24 @@ func resourceArmKeyVaultSecretDelete(d *schema.ResourceData, meta interface{}) e return err } + keyVaultId, err := azure.GetKeyVaultIDFromBaseUrl(ctx, keyVaultClient, id.KeyVaultBaseUrl) + if err != nil { + return fmt.Errorf("Error retrieving the Resource ID the Key Vault at URL %q: %s", id.KeyVaultBaseUrl, err) + } + if keyVaultId == nil { + return fmt.Errorf("Unable to determine the Resource ID for the Key Vault at URL %q", id.KeyVaultBaseUrl) + } + + ok, err := azure.KeyVaultExists(ctx, keyVaultClient, *keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", *keyVaultId, id.Name, id.KeyVaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q - removing from state", id.Name, *keyVaultId, id.KeyVaultBaseUrl) + d.SetId("") + return nil + } + _, err = client.DeleteSecret(ctx, id.KeyVaultBaseUrl, id.Name) return err } diff --git a/azurerm/resource_arm_key_vault_secret_test.go b/azurerm/resource_arm_key_vault_secret_test.go index 53e6e94fd71d..d9c388ed0426 100644 --- a/azurerm/resource_arm_key_vault_secret_test.go +++ b/azurerm/resource_arm_key_vault_secret_test.go @@ -2,8 +2,11 @@ package azurerm import ( "fmt" + "log" "testing" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -36,6 +39,32 @@ func TestAccAzureRMKeyVaultSecret_basic(t *testing.T) { }) } +func TestAccAzureRMKeyVaultSecret_basicClassic(t *testing.T) { + resourceName := "azurerm_key_vault_secret.test" + rs := acctest.RandString(6) + config := testAccAzureRMKeyVaultSecret_basicClasic(rs, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKeyVaultSecretDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKeyVaultSecretExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "value", "rick-and-morty"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMKeyVaultSecret_requiresImport(t *testing.T) { if !requireResourcesToBeImported { t.Skip("Skipping since resources aren't required to be imported") @@ -176,6 +205,16 @@ func testCheckAzureRMKeyVaultSecretDestroy(s *terraform.State) error { name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] + + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } // get the latest version resp, err := client.GetSecret(ctx, vaultBaseUrl, name, "") @@ -194,6 +233,9 @@ func testCheckAzureRMKeyVaultSecretDestroy(s *terraform.State) error { func testCheckAzureRMKeyVaultSecretExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -201,9 +243,16 @@ func testCheckAzureRMKeyVaultSecretExists(resourceName string) resource.TestChec } name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } resp, err := client.GetSecret(ctx, vaultBaseUrl, name, "") if err != nil { @@ -220,6 +269,9 @@ func testCheckAzureRMKeyVaultSecretExists(resourceName string) resource.TestChec func testCheckAzureRMKeyVaultSecretDisappears(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -227,9 +279,16 @@ func testCheckAzureRMKeyVaultSecretDisappears(resourceName string) resource.Test } name := rs.Primary.Attributes["name"] vaultBaseUrl := rs.Primary.Attributes["vault_uri"] + keyVaultId := rs.Primary.Attributes["key_vault_id"] - client := testAccProvider.Meta().(*ArmClient).keyVaultManagementClient - ctx := testAccProvider.Meta().(*ArmClient).StopContext + ok, err := azure.KeyVaultExists(ctx, testAccProvider.Meta().(*ArmClient).keyVaultClient, keyVaultId) + if err != nil { + return fmt.Errorf("Error checking if key vault %q for Secret %q in Vault at url %q exists: %v", keyVaultId, name, vaultBaseUrl, err) + } + if !ok { + log.Printf("[DEBUG] Secret %q Key Vault %q was not found in Key Vault at URI %q ", name, keyVaultId, vaultBaseUrl) + return nil + } resp, err := client.DeleteSecret(ctx, vaultBaseUrl, name) if err != nil { @@ -278,7 +337,54 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { + environment = "Production" + } +} + +resource "azurerm_key_vault_secret" "test" { + name = "secret-%s" + value = "rick-and-morty" + key_vault_id = "${azurerm_key_vault.test.id}" +} +`, rString, location, rString, rString) +} + +func testAccAzureRMKeyVaultSecret_basicClasic(rString string, location string) string { + return fmt.Sprintf(` +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%s" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctestkv-%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + + sku { + name = "premium" + } + + access_policy { + tenant_id = "${data.azurerm_client_config.current.tenant_id}" + object_id = "${data.azurerm_client_config.current.service_principal_object_id}" + + key_permissions = [ + "get", + ] + + secret_permissions = [ + "get", + "delete", + "set", + ] + } + + tags = { environment = "Production" } } @@ -299,7 +405,7 @@ func testAccAzureRMKeyVaultSecret_requiresImport(rString string, location string resource "azurerm_key_vault_secret" "import" { name = "${azurerm_key_vault_secret.test.name}" value = "${azurerm_key_vault_secret.test.value}" - vault_uri = "${azurerm_key_vault_secret.test.vault_uri}" + key_vault_id = "${azurerm_key_vault_secret.test.key_vault_id}" } `, template) } @@ -338,7 +444,7 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } @@ -346,10 +452,10 @@ resource "azurerm_key_vault" "test" { resource "azurerm_key_vault_secret" "test" { name = "secret-%s" value = "" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + key_vault_id = "${azurerm_key_vault.test.id}" content_type = "application/xml" - tags { + tags = { "hello" = "world" } } @@ -390,7 +496,7 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_key_vault_test.go b/azurerm/resource_arm_key_vault_test.go index 0b86d7105a20..b711d458aff7 100644 --- a/azurerm/resource_arm_key_vault_test.go +++ b/azurerm/resource_arm_key_vault_test.go @@ -567,7 +567,7 @@ resource "azurerm_key_vault" "test" { enabled_for_disk_encryption = true enabled_for_template_deployment = true - tags { + tags = { environment = "Staging" } } @@ -611,7 +611,7 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 43f02d784c7f..6e520657989a 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -4,16 +4,17 @@ import ( "bytes" "fmt" "log" - "regexp" "strings" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/kubernetes" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -64,9 +65,10 @@ func resourceArmKubernetesCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, }, "location": locationSchema(), @@ -77,13 +79,14 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateKubernetesClusterDnsPrefix(), + ValidateFunc: validate.KubernetesDNSPrefix, }, "kubernetes_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validate.NoEmptyStrings, }, "agent_pool_profile": { @@ -96,7 +99,7 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateKubernetesClusterAgentPoolName(), + ValidateFunc: validate.KubernetesAgentPoolName, }, "count": { @@ -124,6 +127,7 @@ func resourceArmKubernetesCluster() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validate.NoEmptyStrings, }, "os_disk_size_gb": { @@ -135,9 +139,10 @@ func resourceArmKubernetesCluster() *schema.Resource { }, "vnet_subnet_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, }, "os_type": { @@ -170,16 +175,18 @@ func resourceArmKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "client_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, }, "client_secret": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - Sensitive: true, + Type: schema.TypeString, + ForceNew: true, + Required: true, + Sensitive: true, + ValidateFunc: validate.NoEmptyStrings, }, }, }, @@ -225,8 +232,9 @@ func resourceArmKubernetesCluster() *schema.Resource { Required: true, }, "log_analytics_workspace_id": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: azure.ValidateResourceID, }, }, }, @@ -243,8 +251,9 @@ func resourceArmKubernetesCluster() *schema.Resource { Required: true, }, "subnet_name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, }, }, }, @@ -260,9 +269,10 @@ func resourceArmKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "admin_username": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.KubernetesAdminUserName, }, "ssh_key": { Type: schema.TypeList, @@ -272,9 +282,10 @@ func resourceArmKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key_data": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, }, }, }, @@ -302,31 +313,35 @@ func resourceArmKubernetesCluster() *schema.Resource { }, "dns_service_ip": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validate.IPv4Address, }, "docker_bridge_cidr": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validate.CIDR, }, "pod_cidr": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validate.CIDR, }, "service_cidr": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validate.CIDR, }, }, }, @@ -353,30 +368,34 @@ func resourceArmKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "client_app_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.UUID, }, "server_app_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.UUID, }, "server_app_secret": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - Sensitive: true, + Type: schema.TypeString, + ForceNew: true, + Required: true, + Sensitive: true, + ValidateFunc: validate.NoEmptyStrings, }, "tenant_id": { - // this can be sourced from the client config if it's not specified Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, + // OrEmpty since this can be sourced from the client config if it's not specified + ValidateFunc: validate.UUIDOrEmpty, }, }, }, @@ -1187,20 +1206,6 @@ func resourceKubernetesClusterServicePrincipalProfileHash(v interface{}) int { return hashcode.String(buf.String()) } -func validateKubernetesClusterAgentPoolName() schema.SchemaValidateFunc { - return validation.StringMatch( - regexp.MustCompile("^[a-z]{1}[a-z0-9]{0,11}$"), - "Agent Pool names must start with a lowercase letter, have max length of 12, and only have characters a-z0-9.", - ) -} - -func validateKubernetesClusterDnsPrefix() schema.SchemaValidateFunc { - return validation.StringMatch( - regexp.MustCompile("^[a-zA-Z][-a-zA-Z0-9]{0,43}[a-zA-Z0-9]$"), - "The DNS name must contain between 3 and 45 characters. The name can contain only letters, numbers, and hyphens. The name must start with a letter and must end with a letter or a number.", - ) -} - func flattenKubernetesClusterKubeConfig(config kubernetes.KubeConfig) []interface{} { values := make(map[string]interface{}) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 3f39f0649c72..964164f9743b 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -11,64 +11,6 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" ) -func TestAzureRMKubernetesCluster_agentPoolName(t *testing.T) { - cases := []struct { - Input string - ExpectError bool - }{ - { - Input: "", - ExpectError: true, - }, - { - Input: "hi", - ExpectError: false, - }, - { - Input: "hello", - ExpectError: false, - }, - { - Input: "hello-world", - ExpectError: true, - }, - { - Input: "helloworld123", - ExpectError: true, - }, - { - Input: "hello_world", - ExpectError: true, - }, - { - Input: "Hello-World", - ExpectError: true, - }, - { - Input: "20202020", - ExpectError: true, - }, - { - Input: "h20202020", - ExpectError: false, - }, - { - Input: "ABC123!@£", - ExpectError: true, - }, - } - - for _, tc := range cases { - _, errors := validateKubernetesClusterAgentPoolName()(tc.Input, "") - - hasError := len(errors) > 0 - - if tc.ExpectError && !hasError { - t.Fatalf("Expected the Kubernetes Cluster Agent Pool Name to trigger a validation error for '%s'", tc.Input) - } - } -} - func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() @@ -782,7 +724,7 @@ resource "azurerm_virtual_network" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Testing" } } @@ -837,7 +779,7 @@ resource "azurerm_virtual_network" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Testing" } } @@ -1056,7 +998,7 @@ resource "azurerm_virtual_network" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Testing" } } @@ -1127,7 +1069,7 @@ resource "azurerm_virtual_network" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Testing" } } diff --git a/azurerm/resource_arm_loadbalancer.go b/azurerm/resource_arm_loadbalancer.go index 5113857c77a0..62b0c25a4098 100644 --- a/azurerm/resource_arm_loadbalancer.go +++ b/azurerm/resource_arm_loadbalancer.go @@ -113,6 +113,16 @@ func resourceArmLoadBalancer() *schema.Resource { Set: schema.HashString, }, + "outbound_rules": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validate.NoEmptyStrings, + }, + Set: schema.HashString, + }, + "zones": singleZonesSchema(), }, }, @@ -371,6 +381,15 @@ func flattenLoadBalancerFrontendIpConfiguration(ipConfigs *[]network.FrontendIPC } ipConfig["inbound_nat_rules"] = schema.NewSet(schema.HashString, inboundNatRules) + + outboundRules := make([]interface{}, 0) + if rules := props.OutboundRules; rules != nil { + for _, rule := range *rules { + outboundRules = append(outboundRules, *rule.ID) + } + + } + ipConfig["outbound_rules"] = schema.NewSet(schema.HashString, outboundRules) } result = append(result, ipConfig) diff --git a/azurerm/resource_arm_loadbalancer_outbound_rule.go b/azurerm/resource_arm_loadbalancer_outbound_rule.go new file mode 100644 index 000000000000..f94fe4724ca2 --- /dev/null +++ b/azurerm/resource_arm_loadbalancer_outbound_rule.go @@ -0,0 +1,350 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmLoadBalancerOutboundRule() *schema.Resource { + return &schema.Resource{ + Create: resourceArmLoadBalancerOutboundRuleCreateUpdate, + Read: resourceArmLoadBalancerOutboundRuleRead, + Update: resourceArmLoadBalancerOutboundRuleCreateUpdate, + Delete: resourceArmLoadBalancerOutboundRuleDelete, + + Importer: &schema.ResourceImporter{ + State: loadBalancerSubResourceStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "resource_group_name": resourceGroupNameSchema(), + + "loadbalancer_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "frontend_ip_configuration": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "backend_address_pool_id": { + Type: schema.TypeString, + Required: true, + }, + + "protocol": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(network.Protocol1All), + string(network.Protocol1TCP), + string(network.Protocol1UDP), + }, false), + }, + + "enable_tcp_reset": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "allocated_outbound_ports": { + Type: schema.TypeInt, + Optional: true, + Default: 1024, + }, + + "idle_timeout_in_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 4, + }, + }, + } +} + +func resourceArmLoadBalancerOutboundRuleCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).loadBalancerClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("name").(string) + loadBalancerID := d.Get("loadbalancer_id").(string) + armMutexKV.Lock(loadBalancerID) + defer armMutexKV.Unlock(loadBalancerID) + + loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer By ID: %+v", err) + } + if !exists { + d.SetId("") + log.Printf("[INFO] Load Balancer %q not found. Removing from state", name) + return nil + } + + newOutboundRule, err := expandAzureRmLoadBalancerOutboundRule(d, loadBalancer) + if err != nil { + return fmt.Errorf("Error Exanding Load Balancer Rule: %+v", err) + } + + outboundRules := make([]network.OutboundRule, 0) + + if loadBalancer.LoadBalancerPropertiesFormat.OutboundRules != nil { + outboundRules = *loadBalancer.LoadBalancerPropertiesFormat.OutboundRules + } + + existingOutboundRule, existingOutboundRuleIndex, exists := findLoadBalancerOutboundRuleByName(loadBalancer, name) + if exists { + if name == *existingOutboundRule.Name { + if requireResourcesToBeImported && d.IsNewResource() { + return tf.ImportAsExistsError("azurerm_lb_outbound_rule", *existingOutboundRule.ID) + } + + // this outbound rule is being updated/reapplied remove old copy from the slice + outboundRules = append(outboundRules[:existingOutboundRuleIndex], outboundRules[existingOutboundRuleIndex+1:]...) + } + } + + outboundRules = append(outboundRules, *newOutboundRule) + + loadBalancer.LoadBalancerPropertiesFormat.OutboundRules = &outboundRules + resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(loadBalancerID) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer Name and Group:: %+v", err) + } + + future, err := client.CreateOrUpdate(ctx, resGroup, loadBalancerName, *loadBalancer) + if err != nil { + return fmt.Errorf("Error Creating/Updating LoadBalancer: %+v", err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for completion for Load Balancer updates: %+v", err) + } + + read, err := client.Get(ctx, resGroup, loadBalancerName, "") + if err != nil { + return fmt.Errorf("Error Getting LoadBalancer: %+v", err) + } + + if read.ID == nil { + return fmt.Errorf("Cannot read Load Balancer %s (resource group %s) ID", loadBalancerName, resGroup) + } + + var outboundRuleId string + for _, OutboundRule := range *(*read.LoadBalancerPropertiesFormat).OutboundRules { + if *OutboundRule.Name == name { + outboundRuleId = *OutboundRule.ID + } + } + + if outboundRuleId == "" { + return fmt.Errorf("Cannot find created Load Balancer Outbound Rule ID %q", outboundRuleId) + } + + d.SetId(outboundRuleId) + + return resourceArmLoadBalancerOutboundRuleRead(d, meta) +} + +func resourceArmLoadBalancerOutboundRuleRead(d *schema.ResourceData, meta interface{}) error { + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + name := id.Path["outboundRules"] + + loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer By ID: %+v", err) + } + if !exists { + d.SetId("") + log.Printf("[INFO] Load Balancer %q not found. Removing from state", name) + return nil + } + + config, _, exists := findLoadBalancerOutboundRuleByName(loadBalancer, name) + if !exists { + d.SetId("") + log.Printf("[INFO] Load Balancer Outbound Rule %q not found. Removing from state", name) + return nil + } + + d.Set("name", config.Name) + d.Set("resource_group_name", id.ResourceGroup) + + if properties := config.OutboundRulePropertiesFormat; properties != nil { + d.Set("protocol", properties.Protocol) + d.Set("backend_address_pool_id", properties.BackendAddressPool.ID) + + frontendIpConfigurations := make([]interface{}, 0) + for _, feConfig := range *properties.FrontendIPConfigurations { + if feConfig.ID == nil { + continue + } + + feConfigId, err := parseAzureResourceID(*feConfig.ID) + if err != nil { + return nil + } + + name := feConfigId.Path["frontendIPConfigurations"] + frontendConfiguration := map[string]interface{}{ + "id": *feConfig.ID, + "name": name, + } + frontendIpConfigurations = append(frontendIpConfigurations, frontendConfiguration) + } + d.Set("frontend_ip_configuration", frontendIpConfigurations) + + if properties.EnableTCPReset != nil { + d.Set("enable_tcp_reset", properties.EnableTCPReset) + } + + if properties.IdleTimeoutInMinutes != nil { + d.Set("idle_timeout_in_minutes", properties.IdleTimeoutInMinutes) + } + + if properties.AllocatedOutboundPorts != nil { + d.Set("allocated_outbound_ports", properties.AllocatedOutboundPorts) + } + + } + + return nil +} + +func resourceArmLoadBalancerOutboundRuleDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).loadBalancerClient + ctx := meta.(*ArmClient).StopContext + + loadBalancerID := d.Get("loadbalancer_id").(string) + armMutexKV.Lock(loadBalancerID) + defer armMutexKV.Unlock(loadBalancerID) + + loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer By ID: %+v", err) + } + if !exists { + d.SetId("") + return nil + } + + _, index, exists := findLoadBalancerOutboundRuleByName(loadBalancer, d.Get("name").(string)) + if !exists { + return nil + } + + oldOutboundRules := *loadBalancer.LoadBalancerPropertiesFormat.OutboundRules + newOutboundRules := append(oldOutboundRules[:index], oldOutboundRules[index+1:]...) + loadBalancer.LoadBalancerPropertiesFormat.OutboundRules = &newOutboundRules + + resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) + if err != nil { + return fmt.Errorf("Error Getting Load Balancer Name and Group:: %+v", err) + } + + future, err := client.CreateOrUpdate(ctx, resGroup, loadBalancerName, *loadBalancer) + if err != nil { + return fmt.Errorf("Error Creating/Updating Load Balancer %q (Resource Group %q): %+v", loadBalancerName, resGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for completion of Load Balancer %q (Resource Group %q): %+v", loadBalancerName, resGroup, err) + } + + read, err := client.Get(ctx, resGroup, loadBalancerName, "") + if err != nil { + return fmt.Errorf("Error Getting LoadBalancer: %+v", err) + } + if read.ID == nil { + return fmt.Errorf("Cannot read ID of Load Balancer %q (resource group %s)", loadBalancerName, resGroup) + } + + return nil +} + +func expandAzureRmLoadBalancerOutboundRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.OutboundRule, error) { + + properties := network.OutboundRulePropertiesFormat{ + Protocol: network.Protocol1(d.Get("protocol").(string)), + } + + feConfigs := d.Get("frontend_ip_configuration").([]interface{}) + feConfigSubResources := make([]network.SubResource, 0) + + for _, raw := range feConfigs { + v := raw.(map[string]interface{}) + rule, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v["name"].(string)) + if !exists { + return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v["name"]) + } + + feConfigSubResource := network.SubResource{ + ID: rule.ID, + } + + feConfigSubResources = append(feConfigSubResources, feConfigSubResource) + } + + properties.FrontendIPConfigurations = &feConfigSubResources + + if v := d.Get("backend_address_pool_id").(string); v != "" { + properties.BackendAddressPool = &network.SubResource{ + ID: &v, + } + } + + if v, ok := d.GetOk("idle_timeout_in_minutes"); ok { + properties.IdleTimeoutInMinutes = utils.Int32(int32(v.(int))) + } + + if v, ok := d.GetOk("enable_tcp_reset"); ok { + properties.EnableTCPReset = utils.Bool(v.(bool)) + } + + if v, ok := d.GetOk("allocated_outbound_ports"); ok { + properties.AllocatedOutboundPorts = utils.Int32(int32(v.(int))) + } + + return &network.OutboundRule{ + Name: utils.String(d.Get("name").(string)), + OutboundRulePropertiesFormat: &properties, + }, nil +} diff --git a/azurerm/resource_arm_loadbalancer_outbound_rule_test.go b/azurerm/resource_arm_loadbalancer_outbound_rule_test.go new file mode 100644 index 000000000000..99140873ecfd --- /dev/null +++ b/azurerm/resource_arm_loadbalancer_outbound_rule_test.go @@ -0,0 +1,503 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMLoadBalancerOutboundRule_basic(t *testing.T) { + var lb network.LoadBalancer + ri := tf.AccRandTimeInt() + outboundRuleName := fmt.Sprintf("OutboundRule-%d", ri) + + subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") + outboundRuleId := fmt.Sprintf( + "/subscriptions/%s/resourceGroups/acctestRG-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/outboundRules/%s", + subscriptionID, ri, ri, outboundRuleName) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLoadBalancerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLoadBalancerOutboundRule_basic(ri, outboundRuleName, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + resource.TestCheckResourceAttr( + "azurerm_lb_outbound_rule.test", "id", outboundRuleId), + ), + }, + { + ResourceName: "azurerm_lb.test", + ImportState: true, + ImportStateVerify: true, + // location is deprecated and was never actually used + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func TestAccAzureRMLoadBalancerOutboundRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + var lb network.LoadBalancer + ri := tf.AccRandTimeInt() + outboundRuleName := fmt.Sprintf("OutboundRule-%d", ri) + location := testLocation() + + subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") + outboundRuleId := fmt.Sprintf( + "/subscriptions/%s/resourceGroups/acctestRG-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/outboundRules/%s", + subscriptionID, ri, ri, outboundRuleName) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLoadBalancerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLoadBalancerOutboundRule_basic(ri, outboundRuleName, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + resource.TestCheckResourceAttr( + "azurerm_lb_outbound_rule.test", "id", outboundRuleId), + ), + }, + { + Config: testAccAzureRMLoadBalancerOutboundRule_requiresImport(ri, outboundRuleName, location), + ExpectError: testRequiresImportError("azurerm_lb_outbound_rule"), + }, + }, + }) +} + +func TestAccAzureRMLoadBalancerOutboundRule_removal(t *testing.T) { + var lb network.LoadBalancer + ri := tf.AccRandTimeInt() + outboundRuleName := fmt.Sprintf("OutboundRule-%d", ri) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLoadBalancerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLoadBalancerOutboundRule_basic(ri, outboundRuleName, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + ), + }, + { + Config: testAccAzureRMLoadBalancerOutboundRule_removal(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleNotExists(outboundRuleName, &lb), + ), + }, + }, + }) +} + +func TestAccAzureRMLoadBalancerOutboundRule_update(t *testing.T) { + var lb network.LoadBalancer + ri := tf.AccRandTimeInt() + outboundRuleName := fmt.Sprintf("OutboundRule-%d", ri) + outboundRule2Name := fmt.Sprintf("OutboundRule-%d", tf.AccRandTimeInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLoadBalancerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLoadBalancerOutboundRule_multipleRules(ri, outboundRuleName, outboundRule2Name, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRule2Name, &lb), + resource.TestCheckResourceAttr("azurerm_lb_outbound_rule.test2", "protocol", "Udp"), + ), + }, + { + Config: testAccAzureRMLoadBalancerOutboundRule_multipleRulesUpdate(ri, outboundRuleName, outboundRule2Name, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRule2Name, &lb), + resource.TestCheckResourceAttr("azurerm_lb_outbound_rule.test2", "protocol", "All"), + ), + }, + }, + }) +} + +func TestAccAzureRMLoadBalancerOutboundRule_reapply(t *testing.T) { + var lb network.LoadBalancer + ri := tf.AccRandTimeInt() + outboundRuleName := fmt.Sprintf("OutboundRule-%d", ri) + + deleteOutboundRuleState := func(s *terraform.State) error { + return s.Remove("azurerm_lb_outbound_rule.test") + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLoadBalancerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLoadBalancerOutboundRule_basic(ri, outboundRuleName, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + deleteOutboundRuleState, + ), + ExpectNonEmptyPlan: true, + }, + { + Config: testAccAzureRMLoadBalancerOutboundRule_basic(ri, outboundRuleName, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + ), + }, + }, + }) +} + +func TestAccAzureRMLoadBalancerOutboundRule_disappears(t *testing.T) { + var lb network.LoadBalancer + ri := tf.AccRandTimeInt() + outboundRuleName := fmt.Sprintf("OutboundRule-%d", ri) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLoadBalancerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLoadBalancerOutboundRule_basic(ri, outboundRuleName, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), + testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName, &lb), + testCheckAzureRMLoadBalancerOutboundRuleDisappears(outboundRuleName, &lb), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testCheckAzureRMLoadBalancerOutboundRuleExists(outboundRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { + return func(s *terraform.State) error { + if _, _, exists := findLoadBalancerOutboundRuleByName(lb, outboundRuleName); !exists { + return fmt.Errorf("A Load Balancer Outbound Rule with name %q cannot be found.", outboundRuleName) + } + + return nil + } +} + +func testCheckAzureRMLoadBalancerOutboundRuleNotExists(outboundRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { + return func(s *terraform.State) error { + if _, _, exists := findLoadBalancerOutboundRuleByName(lb, outboundRuleName); exists { + return fmt.Errorf("A Load Balancer Outbound Rule with name %q has been found.", outboundRuleName) + } + + return nil + } +} + +func testCheckAzureRMLoadBalancerOutboundRuleDisappears(ruleName string, lb *network.LoadBalancer) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).loadBalancerClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + _, i, exists := findLoadBalancerOutboundRuleByName(lb, ruleName) + if !exists { + return fmt.Errorf("A Outbound Rule with name %q cannot be found.", ruleName) + } + + currentRules := *lb.LoadBalancerPropertiesFormat.OutboundRules + rules := append(currentRules[:i], currentRules[i+1:]...) + lb.LoadBalancerPropertiesFormat.OutboundRules = &rules + + id, err := parseAzureResourceID(*lb.ID) + if err != nil { + return err + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, *lb.Name, *lb) + if err != nil { + return fmt.Errorf("Error Creating/Updating Load Balancer %q (Resource Group %q): %+v", *lb.Name, id.ResourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for completion of Load Balancer %q (Resource Group %q): %+v", *lb.Name, id.ResourceGroup, err) + } + + _, err = client.Get(ctx, id.ResourceGroup, *lb.Name, "") + return err + } +} + +func testAccAzureRMLoadBalancerOutboundRule_basic(rInt int, outboundRuleName string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_public_ip" "test" { + name = "test-ip-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "arm-test-loadbalancer-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "Standard" + + + frontend_ip_configuration { + name = "one-%d" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "be-%d" +} + +resource "azurerm_lb_outbound_rule" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "%s" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + protocol = "All" + + + frontend_ip_configuration { + name = "one-%d" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, outboundRuleName, rInt) +} + +func testAccAzureRMLoadBalancerOutboundRule_requiresImport(rInt int, name string, location string) string { + template := testAccAzureRMLoadBalancerOutboundRule_basic(rInt, name, location) + return fmt.Sprintf(` +%s + +resource "azurerm_lb_outbound_rule" "import" { + name = "${azurerm_lb_outbound_rule.test.name}" + resource_group_name = "${azurerm_lb_outbound_rule.test.resource_group_name}" + loadbalancer_id = "${azurerm_lb_outbound_rule.test.loadbalancer_id}" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + protocol = "All" + + frontend_ip_configuration { + name = "${azurerm_lb_outbound_rule.test.frontend_ip_configuration.0.name}" + } +} +`, template) +} + +func testAccAzureRMLoadBalancerOutboundRule_removal(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_public_ip" "test" { + name = "test-ip-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb_backend_address_pool" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "be-%d" +} + +resource "azurerm_lb" "test" { + name = "arm-test-loadbalancer-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "Standard" + + frontend_ip_configuration { + name = "one-%d" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } +} +`, rInt, location, rInt, rInt, rInt, rInt) +} + +func testAccAzureRMLoadBalancerOutboundRule_multipleRules(rInt int, outboundRuleName, outboundRule2Name string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_public_ip" "test1" { + name = "test-ip-1-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_public_ip" "test2" { + name = "test-ip-2-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "arm-test-loadbalancer-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "Standard" + + frontend_ip_configuration { + name = "fe1-%d" + public_ip_address_id = "${azurerm_public_ip.test1.id}" + } + + frontend_ip_configuration { + name = "fe2-%d" + public_ip_address_id = "${azurerm_public_ip.test2.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "be-%d" +} + +resource "azurerm_lb_outbound_rule" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "%s" + protocol = "Tcp" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + + frontend_ip_configuration { + name = "fe1-%d" + } +} + +resource "azurerm_lb_outbound_rule" "test2" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "%s" + protocol = "Udp" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + + frontend_ip_configuration { + name = "fe2-%d" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, outboundRuleName, rInt, outboundRule2Name, rInt) +} + +func testAccAzureRMLoadBalancerOutboundRule_multipleRulesUpdate(rInt int, outboundRuleName, outboundRule2Name string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_public_ip" "test1" { + name = "test-ip-1-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_public_ip" "test2" { + name = "test-ip-2-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" + sku = "Standard" +} + +resource "azurerm_lb" "test" { + name = "arm-test-loadbalancer-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "Standard" + + frontend_ip_configuration { + name = "fe1-%d" + public_ip_address_id = "${azurerm_public_ip.test1.id}" + } + + frontend_ip_configuration { + name = "fe2-%d" + public_ip_address_id = "${azurerm_public_ip.test2.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "be-%d" +} + +resource "azurerm_lb_outbound_rule" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "%s" + protocol = "All" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + + frontend_ip_configuration { + name = "fe1-%d" + } +} + +resource "azurerm_lb_outbound_rule" "test2" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "%s" + protocol = "All" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + + frontend_ip_configuration { + name = "fe2-%d" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, outboundRuleName, rInt, outboundRule2Name, rInt) +} diff --git a/azurerm/resource_arm_loadbalancer_test.go b/azurerm/resource_arm_loadbalancer_test.go index 2009621d6849..08b108423601 100644 --- a/azurerm/resource_arm_loadbalancer_test.go +++ b/azurerm/resource_arm_loadbalancer_test.go @@ -287,7 +287,7 @@ resource "azurerm_lb" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -305,7 +305,7 @@ resource "azurerm_lb" "import" { location = "${azurerm_lb.test.location}" resource_group_name = "${azurerm_lb.test.resource_group_name}" - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -326,7 +326,7 @@ resource "azurerm_lb" "test" { resource_group_name = "${azurerm_resource_group.test.name}" sku = "Standard" - tags { + tags = { Environment = "production" Purpose = "AcceptanceTests" } @@ -346,7 +346,7 @@ resource "azurerm_lb" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { Purpose = "AcceptanceTests" } } diff --git a/azurerm/resource_arm_local_network_gateway_test.go b/azurerm/resource_arm_local_network_gateway_test.go index 95a018fe5036..152f35d1c70b 100644 --- a/azurerm/resource_arm_local_network_gateway_test.go +++ b/azurerm/resource_arm_local_network_gateway_test.go @@ -391,7 +391,7 @@ resource "azurerm_local_network_gateway" "test" { gateway_address = "127.0.0.1" address_space = ["127.0.0.0/8"] - tags { + tags = { environment = "acctest" } } diff --git a/azurerm/resource_arm_log_analytics_linked_service.go b/azurerm/resource_arm_log_analytics_linked_service.go new file mode 100644 index 000000000000..1fd03071f3e0 --- /dev/null +++ b/azurerm/resource_arm_log_analytics_linked_service.go @@ -0,0 +1,220 @@ +package azurerm + +import ( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/preview/operationalinsights/mgmt/2015-11-01-preview/operationalinsights" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmLogAnalyticsLinkedService() *schema.Resource { + return &schema.Resource{ + Create: resourceArmLogAnalyticsLinkedServiceCreateUpdate, + Read: resourceArmLogAnalyticsLinkedServiceRead, + Update: resourceArmLogAnalyticsLinkedServiceCreateUpdate, + Delete: resourceArmLogAnalyticsLinkedServiceDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "resource_group_name": resourceGroupNameDiffSuppressSchema(), + + "workspace_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validateAzureRmLogAnalyticsWorkspaceName, + }, + + "linked_service_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "automation", + ValidateFunc: validation.StringInSlice([]string{ + "automation", + }, false), + }, + + "resource_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"linked_service_properties.0"}, + }, + + "linked_service_properties": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + }, + }, + + // Exported properties + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceArmLogAnalyticsLinkedServiceCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).linkedServicesClient + ctx := meta.(*ArmClient).StopContext + + log.Printf("[INFO] preparing arguments for AzureRM Log Analytics Linked Services creation.") + + resGroup := d.Get("resource_group_name").(string) + workspaceName := d.Get("workspace_name").(string) + lsName := d.Get("linked_service_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, workspaceName, lsName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Linked Service %q (Workspace %q / Resource Group %q): %s", lsName, workspaceName, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_log_analytics_linked_service", *existing.ID) + } + } + + resourceId := d.Get("resource_id").(string) + if resourceId == "" { + props := d.Get("linked_service_properties").(map[string]interface{}) + resourceId = props["resource_id"].(string) + if resourceId == "" { + return fmt.Errorf("A `resource_id` must be specified either using the `resource_id` field at the top level or within the `linked_service_properties` block") + } + } + tags := d.Get("tags").(map[string]interface{}) + + parameters := operationalinsights.LinkedService{ + LinkedServiceProperties: &operationalinsights.LinkedServiceProperties{ + ResourceID: utils.String(resourceId), + }, + Tags: expandTags(tags), + } + + if _, err := client.CreateOrUpdate(ctx, resGroup, workspaceName, lsName, parameters); err != nil { + return fmt.Errorf("Error creating Linked Service %q (Workspace %q / Resource Group %q): %+v", lsName, workspaceName, resGroup, err) + } + + read, err := client.Get(ctx, resGroup, workspaceName, lsName) + if err != nil { + return fmt.Errorf("Error retrieving Linked Service %q (Worksppce %q / Resource Group %q): %+v", lsName, workspaceName, resGroup, err) + } + if read.ID == nil { + return fmt.Errorf("Cannot read Linked Service %q (Workspace %q / Resource Group %q) ID", lsName, workspaceName, resGroup) + } + + d.SetId(*read.ID) + + return resourceArmLogAnalyticsLinkedServiceRead(d, meta) +} + +func resourceArmLogAnalyticsLinkedServiceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).linkedServicesClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resGroup := id.ResourceGroup + workspaceName := id.Path["workspaces"] + lsName := id.Path["linkedservices"] + + resp, err := client.Get(ctx, resGroup, workspaceName, lsName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("Error making Read request on AzureRM Log Analytics Linked Service '%s': %+v", lsName, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resGroup) + d.Set("workspace_name", workspaceName) + d.Set("linked_service_name", lsName) + + if props := resp.LinkedServiceProperties; props != nil { + d.Set("resource_id", props.ResourceID) + } + + linkedServiceProperties := flattenLogAnalyticsLinkedServiceProperties(resp.LinkedServiceProperties) + if err := d.Set("linked_service_properties", linkedServiceProperties); err != nil { + return fmt.Errorf("Error setting `linked_service_properties`: %+v", err) + } + + flattenAndSetTags(d, resp.Tags) + return nil +} + +func resourceArmLogAnalyticsLinkedServiceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).linkedServicesClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resGroup := id.ResourceGroup + workspaceName := id.Path["workspaces"] + lsName := id.Path["linkedservices"] + + resp, err := client.Delete(ctx, resGroup, workspaceName, lsName) + if err != nil { + if utils.ResponseWasNotFound(resp) { + return nil + } + + return fmt.Errorf("Error deleting Linked Service %q (Workspace %q / Resource Group %q): %+v", lsName, workspaceName, resGroup, err) + } + + return nil +} + +func flattenLogAnalyticsLinkedServiceProperties(input *operationalinsights.LinkedServiceProperties) []interface{} { + if input == nil { + return []interface{}{} + } + + properties := make(map[string]interface{}) + + // resource id linked service + if resourceID := input.ResourceID; resourceID != nil { + properties["resource_id"] = interface{}(*resourceID) + } + + return []interface{}{properties} +} diff --git a/azurerm/resource_arm_log_analytics_linked_service_test.go b/azurerm/resource_arm_log_analytics_linked_service_test.go new file mode 100644 index 000000000000..fa2a4db021e4 --- /dev/null +++ b/azurerm/resource_arm_log_analytics_linked_service_test.go @@ -0,0 +1,228 @@ +package azurerm + +import ( + "fmt" + "net/http" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMLogAnalyticsLinkedService_basic(t *testing.T) { + resourceName := "azurerm_log_analytics_linked_service.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsLinkedServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsLinkedService_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsLinkedServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("acctestlaw-%d/Automation", ri)), + resource.TestCheckResourceAttr(resourceName, "workspace_name", fmt.Sprintf("acctestlaw-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "linked_service_name", "automation"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMLogAnalyticsLinkedService_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_log_analytics_linked_service.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsLinkedServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsLinkedService_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsLinkedServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("acctestlaw-%d/Automation", ri)), + resource.TestCheckResourceAttr(resourceName, "workspace_name", fmt.Sprintf("acctestlaw-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "linked_service_name", "automation"), + ), + }, + { + Config: testAccAzureRMLogAnalyticsLinkedService_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_log_analytics_linked_service"), + }, + }, + }) +} + +func TestAccAzureRMLogAnalyticsLinkedService_complete(t *testing.T) { + resourceName := "azurerm_log_analytics_linked_service.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsLinkedServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsLinkedService_complete(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsLinkedServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "linked_service_name", "automation"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testCheckAzureRMLogAnalyticsLinkedServiceDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).linkedServicesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_log_analytics_linked_service" { + continue + } + + resourceGroup := rs.Primary.Attributes["resource_group_name"] + workspaceName := rs.Primary.Attributes["workspace_name"] + lsName := rs.Primary.Attributes["linked_service_name"] + + resp, err := conn.Get(ctx, resourceGroup, workspaceName, lsName) + if err != nil { + return nil + } + if resp.ID == nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Log Analytics Linked Service still exists:\n%#v", resp) + } + } + + return nil +} + +func testCheckAzureRMLogAnalyticsLinkedServiceExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + workspaceName := rs.Primary.Attributes["workspace_name"] + lsName := rs.Primary.Attributes["linked_service_name"] + name := rs.Primary.Attributes["name"] + + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Log Analytics Linked Service: '%s'", name) + } + + conn := testAccProvider.Meta().(*ArmClient).linkedServicesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := conn.Get(ctx, resourceGroup, workspaceName, lsName) + if err != nil { + return fmt.Errorf("Bad: Get on Log Analytics Linked Service Client: %+v", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Log Analytics Linked Service '%s' (resource group: '%s') does not exist", name, resourceGroup) + } + + return nil + } +} + +func testAccAzureRMLogAnalyticsLinkedService_basic(rInt int, location string) string { + template := testAccAzureRMLogAnalyticsLinkedService_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_linked_service" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + workspace_name = "${azurerm_log_analytics_workspace.test.name}" + resource_id = "${azurerm_automation_account.test.id}" +} +`, template) +} + +func testAccAzureRMLogAnalyticsLinkedService_requiresImport(rInt int, location string) string { + template := testAccAzureRMLogAnalyticsLinkedService_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_linked_service" "import" { + resource_group_name = "${azurerm_log_analytics_linked_service.test.resource_group_name}" + workspace_name = "${azurerm_log_analytics_linked_service.test.workspace_name}" + resource_id = "${azurerm_log_analytics_linked_service.test.resource_id}" +} +`, template) +} + +func testAccAzureRMLogAnalyticsLinkedService_complete(rInt int, location string) string { + template := testAccAzureRMLogAnalyticsLinkedService_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_linked_service" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + workspace_name = "${azurerm_log_analytics_workspace.test.name}" + linked_service_name = "automation" + resource_id = "${azurerm_automation_account.test.id}" +} +`, template) +} + +func testAccAzureRMLogAnalyticsLinkedService_template(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_automation_account" "test" { + name = "acctestAutomation-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + sku { + name = "Basic" + } + + tags = { + Environment = "Test" + } +} + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "PerGB2018" + retention_in_days = 30 +} +`, rInt, location, rInt, rInt) +} diff --git a/azurerm/resource_arm_log_analytics_workspace_linked_service.go b/azurerm/resource_arm_log_analytics_workspace_linked_service.go index 544a708516a3..a418fdff9e45 100644 --- a/azurerm/resource_arm_log_analytics_workspace_linked_service.go +++ b/azurerm/resource_arm_log_analytics_workspace_linked_service.go @@ -13,15 +13,15 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -/* -TODO: refactor this: - - * resource_group_name/workspace_name can become case-sensitive - * linked_service_properties should be a list / removed in favour of the top level element? - * we can remove `workspace` from the resource name? -*/ func resourceArmLogAnalyticsWorkspaceLinkedService() *schema.Resource { return &schema.Resource{ + DeprecationMessage: `The 'azurerm_log_analytics_workspace_linked_service' resource is deprecated in favour of the renamed version 'azurerm_log_analytics_linked_service'. + +Information on migrating to the renamed resource can be found here: https://terraform.io/docs/providers/azurerm/guides/migrating-between-renamed-resources.html + +As such the existing 'azurerm_log_analytics_workspace_linked_service' resource is deprecated and will be removed in the next major version of the AzureRM Provider (2.0). +`, + Create: resourceArmLogAnalyticsWorkspaceLinkedServiceCreateUpdate, Read: resourceArmLogAnalyticsWorkspaceLinkedServiceRead, Update: resourceArmLogAnalyticsWorkspaceLinkedServiceCreateUpdate, @@ -52,9 +52,19 @@ func resourceArmLogAnalyticsWorkspaceLinkedService() *schema.Resource { }, false), }, + "resource_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + ConflictsWith: []string{"linked_service_properties.0"}, + }, + "linked_service_properties": { - Type: schema.TypeMap, - Required: true, + Type: schema.TypeList, + Optional: true, + Computed: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -102,16 +112,21 @@ func resourceArmLogAnalyticsWorkspaceLinkedServiceCreateUpdate(d *schema.Resourc } } - props := d.Get("linked_service_properties").(map[string]interface{}) - resourceID := props["resource_id"].(string) - + resourceId := d.Get("resource_id").(string) + if resourceId == "" { + props := d.Get("linked_service_properties").(map[string]interface{}) + resourceId = props["resource_id"].(string) + if resourceId == "" { + return fmt.Errorf("A `resource_id` must be specified either using the `resource_id` field at the top level or within the `linked_service_properties` block") + } + } tags := d.Get("tags").(map[string]interface{}) parameters := operationalinsights.LinkedService{ - Tags: expandTags(tags), LinkedServiceProperties: &operationalinsights.LinkedServiceProperties{ - ResourceID: &resourceID, + ResourceID: utils.String(resourceId), }, + Tags: expandTags(tags), } if _, err := client.CreateOrUpdate(ctx, resGroup, workspaceName, lsName, parameters); err != nil { @@ -142,7 +157,7 @@ func resourceArmLogAnalyticsWorkspaceLinkedServiceRead(d *schema.ResourceData, m resGroup := id.ResourceGroup workspaceName := id.Path["workspaces"] - lsName := id.Path["linkedServices"] + lsName := id.Path["linkedservices"] resp, err := client.Get(ctx, resGroup, workspaceName, lsName) if err != nil { @@ -152,16 +167,16 @@ func resourceArmLogAnalyticsWorkspaceLinkedServiceRead(d *schema.ResourceData, m } return fmt.Errorf("Error making Read request on AzureRM Log Analytics Linked Service '%s': %+v", lsName, err) } - if resp.ID == nil { - d.SetId("") - return nil - } d.Set("name", resp.Name) d.Set("resource_group_name", resGroup) d.Set("workspace_name", workspaceName) d.Set("linked_service_name", lsName) + if props := resp.LinkedServiceProperties; props != nil { + d.Set("resource_id", props.ResourceID) + } + linkedServiceProperties := flattenLogAnalyticsWorkspaceLinkedServiceProperties(resp.LinkedServiceProperties) if err := d.Set("linked_service_properties", linkedServiceProperties); err != nil { return fmt.Errorf("Error setting `linked_service_properties`: %+v", err) @@ -182,7 +197,7 @@ func resourceArmLogAnalyticsWorkspaceLinkedServiceDelete(d *schema.ResourceData, resGroup := id.ResourceGroup workspaceName := id.Path["workspaces"] - lsName := id.Path["linkedServices"] + lsName := id.Path["linkedservices"] resp, err := client.Delete(ctx, resGroup, workspaceName, lsName) if err != nil { @@ -196,7 +211,7 @@ func resourceArmLogAnalyticsWorkspaceLinkedServiceDelete(d *schema.ResourceData, return nil } -func flattenLogAnalyticsWorkspaceLinkedServiceProperties(input *operationalinsights.LinkedServiceProperties) interface{} { +func flattenLogAnalyticsWorkspaceLinkedServiceProperties(input *operationalinsights.LinkedServiceProperties) []interface{} { if input == nil { return []interface{}{} } @@ -208,5 +223,5 @@ func flattenLogAnalyticsWorkspaceLinkedServiceProperties(input *operationalinsig properties["resource_id"] = interface{}(*resourceID) } - return interface{}(properties) + return []interface{}{properties} } diff --git a/azurerm/resource_arm_log_analytics_workspace_linked_service_test.go b/azurerm/resource_arm_log_analytics_workspace_linked_service_test.go index 2787d4720898..d54247052464 100644 --- a/azurerm/resource_arm_log_analytics_workspace_linked_service_test.go +++ b/azurerm/resource_arm_log_analytics_workspace_linked_service_test.go @@ -164,11 +164,7 @@ func testAccAzureRMLogAnalyticsWorkspaceLinkedService_basic(rInt int, location s resource "azurerm_log_analytics_workspace_linked_service" "test" { resource_group_name = "${azurerm_resource_group.test.name}" workspace_name = "${azurerm_log_analytics_workspace.test.name}" - - linked_service_properties { - resource_id = "${azurerm_automation_account.test.id}" - } -} + resource_id = "${azurerm_automation_account.test.id}"} `, template) } @@ -180,10 +176,7 @@ func testAccAzureRMLogAnalyticsWorkspaceLinkedService_requiresImport(rInt int, l resource "azurerm_log_analytics_workspace_linked_service" "import" { resource_group_name = "${azurerm_log_analytics_workspace_linked_service.test.resource_group_name}" workspace_name = "${azurerm_log_analytics_workspace_linked_service.test.workspace_name}" - - linked_service_properties { - resource_id = "${azurerm_automation_account.test.id}" - } + resource_id = "${azurerm_log_analytics_workspace_linked_service.test.resource_id}" } `, template) } @@ -197,10 +190,7 @@ resource "azurerm_log_analytics_workspace_linked_service" "test" { resource_group_name = "${azurerm_resource_group.test.name}" workspace_name = "${azurerm_log_analytics_workspace.test.name}" linked_service_name = "automation" - - linked_service_properties { - resource_id = "${azurerm_automation_account.test.id}" - } + resource_id = "${azurerm_automation_account.test.id}" } `, template) } @@ -221,7 +211,7 @@ resource "azurerm_automation_account" "test" { name = "Basic" } - tags { + tags = { Environment = "Test" } } diff --git a/azurerm/resource_arm_log_analytics_workspace_test.go b/azurerm/resource_arm_log_analytics_workspace_test.go index 1563647dc6bd..a10458b59b33 100644 --- a/azurerm/resource_arm_log_analytics_workspace_test.go +++ b/azurerm/resource_arm_log_analytics_workspace_test.go @@ -228,7 +228,7 @@ resource "azurerm_log_analytics_workspace" "test" { sku = "PerGB2018" retention_in_days = 30 - tags { + tags = { Environment = "Test" } } diff --git a/azurerm/resource_arm_logic_app_action_http_test.go b/azurerm/resource_arm_logic_app_action_http_test.go index 514d60db8f17..1fc19711daab 100644 --- a/azurerm/resource_arm_logic_app_action_http_test.go +++ b/azurerm/resource_arm_logic_app_action_http_test.go @@ -153,7 +153,7 @@ resource "azurerm_logic_app_action_http" "test" { method = "GET" uri = "http://example.com/hello" - headers { + headers = { "Hello" = "World" "Something" = "New" } diff --git a/azurerm/resource_arm_logic_app_workflow_test.go b/azurerm/resource_arm_logic_app_workflow_test.go index 8dc459ad5882..4241bb9f5c89 100644 --- a/azurerm/resource_arm_logic_app_workflow_test.go +++ b/azurerm/resource_arm_logic_app_workflow_test.go @@ -199,7 +199,7 @@ resource "azurerm_logic_app_workflow" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Source" = "AcceptanceTests" } } diff --git a/azurerm/resource_arm_managed_disk_test.go b/azurerm/resource_arm_managed_disk_test.go index 56af11afa243..439ae7f65e09 100644 --- a/azurerm/resource_arm_managed_disk_test.go +++ b/azurerm/resource_arm_managed_disk_test.go @@ -379,7 +379,7 @@ resource "azurerm_managed_disk" "test" { create_option = "Empty" disk_size_gb = "1" - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -400,7 +400,7 @@ resource "azurerm_managed_disk" "import" { create_option = "Empty" disk_size_gb = "1" - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -424,7 +424,7 @@ resource "azurerm_managed_disk" "test" { disk_size_gb = "1" zones = ["1"] - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -446,7 +446,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -467,7 +467,7 @@ resource "azurerm_managed_disk" "test" { source_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" disk_size_gb = "45" - tags { + tags = { environment = "acctest" } } @@ -489,7 +489,7 @@ resource "azurerm_managed_disk" "source" { create_option = "Empty" disk_size_gb = "1" - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -504,7 +504,7 @@ resource "azurerm_managed_disk" "test" { source_resource_id = "${azurerm_managed_disk.source.id}" disk_size_gb = "1" - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -527,7 +527,7 @@ resource "azurerm_managed_disk" "test" { create_option = "Empty" disk_size_gb = "2" - tags { + tags = { environment = "acctest" } } @@ -549,7 +549,7 @@ resource "azurerm_managed_disk" "test" { create_option = "Empty" disk_size_gb = "1" - tags { + tags = { environment = "acctest" cost-center = "ops" } @@ -648,7 +648,7 @@ resource "azurerm_key_vault" "test" { enabled_for_disk_encryption = true - tags { + tags = { environment = "Production" } } @@ -693,7 +693,7 @@ resource "azurerm_managed_disk" "test" { } } - tags { + tags = { environment = "acctest" cost-center = "ops" } diff --git a/azurerm/resource_arm_media_services_account.go b/azurerm/resource_arm_media_services_account.go new file mode 100644 index 000000000000..a529a0f60f79 --- /dev/null +++ b/azurerm/resource_arm_media_services_account.go @@ -0,0 +1,215 @@ +package azurerm + +import ( + "fmt" + "log" + "regexp" + + "github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmMediaServicesAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceArmMediaServicesAccountCreateUpdate, + Read: resourceArmMediaServicesAccountRead, + Update: resourceArmMediaServicesAccountCreateUpdate, + Delete: resourceArmMediaServicesAccountDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-a-z0-9]{3,24}$"), + "Media Services Account name must be 3 - 24 characters long, contain only lowercase letters and numbers.", + ), + }, + + "location": locationSchema(), + + "resource_group_name": resourceGroupNameSchema(), + + "storage_account": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "is_primary": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + // TODO: support Tags when this bug is fixed: + // https://github.com/Azure/azure-rest-api-specs/issues/5249 + //"tags": tagsSchema(), + }, + } +} + +func resourceArmMediaServicesAccountCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).mediaServicesClient + ctx := meta.(*ArmClient).StopContext + + accountName := d.Get("name").(string) + location := azureRMNormalizeLocation(d.Get("location").(string)) + resourceGroup := d.Get("resource_group_name").(string) + + storageAccountsRaw := d.Get("storage_account").(*schema.Set).List() + storageAccounts, err := expandMediaServicesAccountStorageAccounts(storageAccountsRaw) + if err != nil { + return err + } + + parameters := media.Service{ + ServiceProperties: &media.ServiceProperties{ + StorageAccounts: storageAccounts, + }, + Location: utils.String(location), + } + + if _, e := client.CreateOrUpdate(ctx, resourceGroup, accountName, parameters); e != nil { + return fmt.Errorf("Error creating Media Service Account %q (Resource Group %q): %+v", accountName, resourceGroup, e) + } + + service, err := client.Get(ctx, resourceGroup, accountName) + if err != nil { + return fmt.Errorf("Error retrieving Media Service Account %q (Resource Group %q): %+v", accountName, resourceGroup, err) + } + d.SetId(*service.ID) + + return nil +} + +func resourceArmMediaServicesAccountRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).mediaServicesClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + + name := id.Path["mediaservices"] + resourceGroup := id.ResourceGroup + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] Media Services Account %q was not found in Resource Group %q - removing from state", name, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Media Services Account %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + + if props := resp.ServiceProperties; props != nil { + accounts := flattenMediaServicesAccountStorageAccounts(props.StorageAccounts) + if e := d.Set("storage_account", accounts); e != nil { + return fmt.Errorf("Error flattening `storage_account`: %s", e) + } + } + + //flattenAndSetTags(d, resp.Tags) + + return nil +} + +func resourceArmMediaServicesAccountDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).mediaServicesClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + + name := id.Path["mediaservices"] + resourceGroup := id.ResourceGroup + + resp, err := client.Delete(ctx, resourceGroup, name) + if err != nil { + if response.WasNotFound(resp.Response) { + return nil + } + return fmt.Errorf("Error issuing AzureRM delete request for Media Services Account '%s': %+v", name, err) + } + + return nil +} + +func expandMediaServicesAccountStorageAccounts(input []interface{}) (*[]media.StorageAccount, error) { + results := make([]media.StorageAccount, 0) + + foundPrimary := false + for _, accountMapRaw := range input { + accountMap := accountMapRaw.(map[string]interface{}) + + id := accountMap["id"].(string) + + storageType := media.Secondary + if accountMap["is_primary"].(bool) { + if foundPrimary { + return nil, fmt.Errorf("Only one Storage Account can be set as Primary") + } + + storageType = media.Primary + foundPrimary = true + } + + storageAccount := media.StorageAccount{ + ID: utils.String(id), + Type: storageType, + } + + results = append(results, storageAccount) + } + + return &results, nil +} + +func flattenMediaServicesAccountStorageAccounts(input *[]media.StorageAccount) []interface{} { + if input == nil { + return []interface{}{} + } + + results := make([]interface{}, 0) + for _, storageAccount := range *input { + output := make(map[string]interface{}) + + if storageAccount.ID != nil { + output["id"] = *storageAccount.ID + } + + output["is_primary"] = storageAccount.Type == media.Primary + + results = append(results, output) + } + + return results +} diff --git a/azurerm/resource_arm_media_services_account_test.go b/azurerm/resource_arm_media_services_account_test.go new file mode 100644 index 000000000000..7939946246df --- /dev/null +++ b/azurerm/resource_arm_media_services_account_test.go @@ -0,0 +1,270 @@ +package azurerm + +import ( + "fmt" + "net/http" + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMMediaServicesAccount_basic(t *testing.T) { + resourceName := "azurerm_media_services_account.test" + ri := tf.AccRandTimeInt() + rs := acctest.RandString(5) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMediaServicesAccountDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMediaServicesAccount_basic(ri, rs, testLocation()), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "storage_account.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMMediaServicesAccount_multipleAccounts(t *testing.T) { + resourceName := "azurerm_media_services_account.test" + ri := tf.AccRandTimeInt() + rs := acctest.RandString(5) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMediaServicesAccountDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMediaServicesAccount_multipleAccounts(ri, rs, location), + Check: resource.ComposeAggregateTestCheckFunc( + testCheckAzureRMMediaServicesAccountExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "storage_account.#", "2"), + ), + }, + { + Config: testAccAzureRMMediaServicesAccount_multipleAccountsUpdated(ri, rs, location), + PlanOnly: true, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMMediaServicesAccount_multiplePrimaries(t *testing.T) { + ri := tf.AccRandTimeInt() + rs := acctest.RandString(5) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMediaServicesAccountDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMediaServicesAccount_multiplePrimaries(ri, rs, testLocation()), + ExpectError: regexp.MustCompile("Only one Storage Account can be set as Primary"), + }, + }, + }) +} + +func testCheckAzureRMMediaServicesAccountExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Media service not found: %s", resourceName) + } + + name := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Media Services Account: '%s'", name) + } + + conn := testAccProvider.Meta().(*ArmClient).mediaServicesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := conn.Get(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Bad: Get on mediaServicesClient: %+v", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Media Services Account %q (Resource Group %q) does not exist", name, resourceGroup) + } + + return nil + } +} + +func testCheckAzureRMMediaServicesAccountDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).mediaServicesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_media_services_account" { + continue + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := conn.Get(ctx, resourceGroup, name) + + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Media Services Account still exists:\n%#v", resp) + } + } + + return nil +} + +func testAccAzureRMMediaServicesAccount_basic(rInt int, rString, location string) string { + template := testAccAzureRMMediaServicesAccount_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_media_services_account" "test" { + name = "acctestmsa%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + storage_account { + id = "${azurerm_storage_account.first.id}" + is_primary = true + } +} +`, template, rString) +} + +func testAccAzureRMMediaServicesAccount_multipleAccounts(rInt int, rString, location string) string { + template := testAccAzureRMMediaServicesAccount_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_account" "second" { + name = "acctestsa2%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "GRS" +} + +resource "azurerm_media_services_account" "test" { + name = "acctestmsa%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + storage_account { + id = "${azurerm_storage_account.first.id}" + is_primary = true + } + + storage_account { + id = "${azurerm_storage_account.second.id}" + is_primary = false + } +} +`, template, rString, rString) +} + +func testAccAzureRMMediaServicesAccount_multipleAccountsUpdated(rInt int, rString, location string) string { + template := testAccAzureRMMediaServicesAccount_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_account" "second" { + name = "acctestsa2%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "GRS" +} + +resource "azurerm_media_services_account" "test" { + name = "acctestmsa%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + storage_account { + id = "${azurerm_storage_account.second.id}" + is_primary = false + } + + storage_account { + id = "${azurerm_storage_account.first.id}" + is_primary = true + } +} +`, template, rString, rString) +} + +func testAccAzureRMMediaServicesAccount_multiplePrimaries(rInt int, rString, location string) string { + template := testAccAzureRMMediaServicesAccount_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_account" "second" { + name = "acctestsa2%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "GRS" +} + +resource "azurerm_media_services_account" "test" { + name = "acctestmsa%s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + storage_account { + id = "${azurerm_storage_account.first.id}" + is_primary = true + } + + storage_account { + id = "${azurerm_storage_account.second.id}" + is_primary = true + } +} +`, template, rString, rString) +} + +func testAccAzureRMMediaServicesAccount_template(rInt int, rString, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "%d" + location = "%s" +} + +resource "azurerm_storage_account" "first" { + name = "acctestsa1%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "GRS" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_metric_alertrule.go b/azurerm/resource_arm_metric_alertrule.go index 8207f9c2aa5b..884022d624b4 100644 --- a/azurerm/resource_arm_metric_alertrule.go +++ b/azurerm/resource_arm_metric_alertrule.go @@ -18,9 +18,16 @@ func resourceArmMetricAlertRule() *schema.Resource { Read: resourceArmMetricAlertRuleRead, Update: resourceArmMetricAlertRuleCreateUpdate, Delete: resourceArmMetricAlertRuleDelete, + Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, + DeprecationMessage: `The 'azurerm_metric_alertrule' resource is deprecated in favour of the renamed version 'azurerm_monitor_metric_alertrule'. + +Information on migrating to the renamed resource can be found here: https://terraform.io/docs/providers/azurerm/guides/migrating-between-renamed-resources.html + +As such the existing 'azurerm_metric_alertrule' resource is deprecated and will be removed in the next major version of the AzureRM Provider (2.0). +`, Schema: map[string]*schema.Schema{ "name": { @@ -423,17 +430,6 @@ func expandAzureRmMetricThresholdAlertRule(d *schema.ResourceData) (*insights.Al return &alertRule, nil } -func resourceGroupAndAlertRuleNameFromId(alertRuleId string) (string, string, error) { - id, err := parseAzureResourceID(alertRuleId) - if err != nil { - return "", "", err - } - name := id.Path["alertrules"] - resourceGroup := id.ResourceGroup - - return resourceGroup, name, nil -} - func validateMetricAlertRuleTags(v interface{}, f string) (warnings []string, errors []error) { // Normal validation required by any AzureRM resource. warnings, errors = validateAzureRMTags(v, f) @@ -448,3 +444,14 @@ func validateMetricAlertRuleTags(v interface{}, f string) (warnings []string, er return warnings, errors } + +func resourceGroupAndAlertRuleNameFromId(alertRuleId string) (string, string, error) { + id, err := parseAzureResourceID(alertRuleId) + if err != nil { + return "", "", err + } + name := id.Path["alertrules"] + resourceGroup := id.ResourceGroup + + return resourceGroup, name, nil +} diff --git a/azurerm/resource_arm_monitor_activity_log_alert_test.go b/azurerm/resource_arm_monitor_activity_log_alert_test.go index b7206a39bcb6..df378c8b1a92 100644 --- a/azurerm/resource_arm_monitor_activity_log_alert_test.go +++ b/azurerm/resource_arm_monitor_activity_log_alert_test.go @@ -345,7 +345,7 @@ resource "azurerm_monitor_activity_log_alert" "test" { action { action_group_id = "${azurerm_monitor_action_group.test2.id}" - webhook_properties { + webhook_properties = { from = "terraform test" to = "microsoft azure" } diff --git a/azurerm/resource_arm_monitor_autoscale_setting.go b/azurerm/resource_arm_monitor_autoscale_setting.go new file mode 100644 index 000000000000..7f760b4e6a56 --- /dev/null +++ b/azurerm/resource_arm_monitor_autoscale_setting.go @@ -0,0 +1,1065 @@ +package azurerm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" + "github.com/Azure/go-autorest/autorest/date" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmMonitorAutoScaleSetting() *schema.Resource { + return &schema.Resource{ + Create: resourceArmMonitorAutoScaleSettingCreateUpdate, + Read: resourceArmMonitorAutoScaleSettingRead, + Update: resourceArmMonitorAutoScaleSettingCreateUpdate, + Delete: resourceArmMonitorAutoScaleSettingDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "resource_group_name": resourceGroupNameSchema(), + + "location": locationSchema(), + + "target_resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "profile": { + Type: schema.TypeList, + Required: true, + MaxItems: 20, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "capacity": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimum": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + "maximum": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + "default": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + }, + }, + }, + "rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_trigger": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "metric_resource_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: azure.ValidateResourceID, + }, + "time_grain": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIso8601Duration(), + }, + "statistic": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.MetricStatisticTypeAverage), + string(insights.MetricStatisticTypeMax), + string(insights.MetricStatisticTypeMin), + string(insights.MetricStatisticTypeSum), + }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + "time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIso8601Duration(), + }, + "time_aggregation": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.TimeAggregationTypeAverage), + string(insights.TimeAggregationTypeCount), + string(insights.TimeAggregationTypeMaximum), + string(insights.TimeAggregationTypeMinimum), + string(insights.TimeAggregationTypeTotal), + }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.Equals), + string(insights.GreaterThan), + string(insights.GreaterThanOrEqual), + string(insights.LessThan), + string(insights.LessThanOrEqual), + string(insights.NotEquals), + }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + "threshold": { + Type: schema.TypeFloat, + Required: true, + }, + }, + }, + }, + "scale_action": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "direction": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.ScaleDirectionDecrease), + string(insights.ScaleDirectionIncrease), + }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.ChangeCount), + string(insights.ExactCount), + string(insights.PercentChangeCount), + }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + "value": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "cooldown": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIso8601Duration(), + }, + }, + }, + }, + }, + }, + }, + "fixed_date": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + ValidateFunc: validateMonitorAutoScaleSettingsTimeZone(), + }, + "start": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRFC3339Date, + }, + "end": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRFC3339Date, + }, + }, + }, + }, + "recurrence": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + ValidateFunc: validateMonitorAutoScaleSettingsTimeZone(), + }, + "days": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + }, + "hours": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeInt, + ValidateFunc: validation.IntBetween(0, 23), + }, + }, + "minutes": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeInt, + ValidateFunc: validation.IntBetween(0, 59), + }, + }, + }, + }, + }, + }, + }, + }, + + "notification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "send_to_subscription_administrator": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "send_to_subscription_co_administrator": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "custom_emails": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "webhook": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_uri": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "properties": { + Type: schema.TypeMap, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceArmMonitorAutoScaleSettingCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).autoscaleSettingsClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Monitor AutoScale Setting %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_monitor_autoscale_setting", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) + enabled := d.Get("enabled").(bool) + targetResourceId := d.Get("target_resource_id").(string) + + notificationsRaw := d.Get("notification").([]interface{}) + notifications := expandAzureRmMonitorAutoScaleSettingNotifications(notificationsRaw) + + profilesRaw := d.Get("profile").([]interface{}) + profiles, err := expandAzureRmMonitorAutoScaleSettingProfile(profilesRaw) + if err != nil { + return fmt.Errorf("Error expanding `profile`: %+v", err) + } + + tags := d.Get("tags").(map[string]interface{}) + expandedTags := expandTags(tags) + + parameters := insights.AutoscaleSettingResource{ + Location: utils.String(location), + AutoscaleSetting: &insights.AutoscaleSetting{ + Enabled: &enabled, + Profiles: profiles, + Notifications: notifications, + TargetResourceURI: &targetResourceId, + }, + Tags: expandedTags, + } + + if _, err = client.CreateOrUpdate(ctx, resourceGroup, name, parameters); err != nil { + return fmt.Errorf("Error creating AutoScale Setting %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + read, err := client.Get(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error retrieving AutoScale Setting %q (Resource Group %q): %+v", name, resourceGroup, err) + } + if read.ID == nil { + return fmt.Errorf("AutoScale Setting %q (Resource Group %q) has no ID", name, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmMonitorAutoScaleSettingRead(d, meta) +} + +func resourceArmMonitorAutoScaleSettingRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).autoscaleSettingsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + name := id.Path["autoscalesettings"] + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] AutoScale Setting %q (Resource Group %q) was not found - removing from state!", name, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error reading AutoScale Setting %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("name", name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + + d.Set("enabled", resp.Enabled) + d.Set("target_resource_id", resp.TargetResourceURI) + + profile, err := flattenAzureRmMonitorAutoScaleSettingProfile(resp.Profiles) + if err != nil { + return fmt.Errorf("Error flattening `profile` of Autoscale Setting %q (Resource Group %q): %+v", name, resourceGroup, err) + } + if err = d.Set("profile", profile); err != nil { + return fmt.Errorf("Error setting `profile` of Autoscale Setting %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + notifications := flattenAzureRmMonitorAutoScaleSettingNotification(resp.Notifications) + if err = d.Set("notification", notifications); err != nil { + return fmt.Errorf("Error setting `notification` of Autoscale Setting %q (resource group %q): %+v", name, resourceGroup, err) + } + + // Return a new tag map filtered by the specified tag names. + tagMap := filterTags(resp.Tags, "$type") + flattenAndSetTags(d, tagMap) + + return nil +} + +func resourceArmMonitorAutoScaleSettingDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).autoscaleSettingsClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + name := id.Path["autoscalesettings"] + + resp, err := client.Delete(ctx, resourceGroup, name) + if err != nil { + if !response.WasNotFound(resp.Response) { + return fmt.Errorf("Error deleting AutoScale Setting %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + return nil +} + +func expandAzureRmMonitorAutoScaleSettingProfile(input []interface{}) (*[]insights.AutoscaleProfile, error) { + results := make([]insights.AutoscaleProfile, 0) + + for _, v := range input { + raw := v.(map[string]interface{}) + + name := raw["name"].(string) + + // this is Required, so we don't need to check for optionals here + capacitiesRaw := raw["capacity"].([]interface{}) + capacityRaw := capacitiesRaw[0].(map[string]interface{}) + capacity := insights.ScaleCapacity{ + Minimum: utils.String(strconv.Itoa(capacityRaw["minimum"].(int))), + Maximum: utils.String(strconv.Itoa(capacityRaw["maximum"].(int))), + Default: utils.String(strconv.Itoa(capacityRaw["default"].(int))), + } + + recurrencesRaw := raw["recurrence"].([]interface{}) + recurrence := expandAzureRmMonitorAutoScaleSettingRecurrence(recurrencesRaw) + + rulesRaw := raw["rule"].([]interface{}) + rules := expandAzureRmMonitorAutoScaleSettingRule(rulesRaw) + + fixedDatesRaw := raw["fixed_date"].([]interface{}) + fixedDate, err := expandAzureRmMonitorAutoScaleSettingFixedDate(fixedDatesRaw) + if err != nil { + return nil, fmt.Errorf("Error expanding `fixed_date`: %+v", err) + } + + result := insights.AutoscaleProfile{ + Name: utils.String(name), + Capacity: &capacity, + FixedDate: fixedDate, + Recurrence: recurrence, + Rules: rules, + } + results = append(results, result) + } + + return &results, nil +} + +func expandAzureRmMonitorAutoScaleSettingRule(input []interface{}) *[]insights.ScaleRule { + rules := make([]insights.ScaleRule, 0) + + for _, v := range input { + ruleRaw := v.(map[string]interface{}) + + triggersRaw := ruleRaw["metric_trigger"].([]interface{}) + triggerRaw := triggersRaw[0].(map[string]interface{}) + metricTrigger := insights.MetricTrigger{ + MetricName: utils.String(triggerRaw["metric_name"].(string)), + MetricResourceURI: utils.String(triggerRaw["metric_resource_id"].(string)), + TimeGrain: utils.String(triggerRaw["time_grain"].(string)), + Statistic: insights.MetricStatisticType(triggerRaw["statistic"].(string)), + TimeWindow: utils.String(triggerRaw["time_window"].(string)), + TimeAggregation: insights.TimeAggregationType(triggerRaw["time_aggregation"].(string)), + Operator: insights.ComparisonOperationType(triggerRaw["operator"].(string)), + Threshold: utils.Float(triggerRaw["threshold"].(float64)), + } + + actionsRaw := ruleRaw["scale_action"].([]interface{}) + actionRaw := actionsRaw[0].(map[string]interface{}) + scaleAction := insights.ScaleAction{ + Direction: insights.ScaleDirection(actionRaw["direction"].(string)), + Type: insights.ScaleType(actionRaw["type"].(string)), + Value: utils.String(strconv.Itoa(actionRaw["value"].(int))), + Cooldown: utils.String(actionRaw["cooldown"].(string)), + } + + rule := insights.ScaleRule{ + MetricTrigger: &metricTrigger, + ScaleAction: &scaleAction, + } + + rules = append(rules, rule) + } + + return &rules +} + +func expandAzureRmMonitorAutoScaleSettingFixedDate(input []interface{}) (*insights.TimeWindow, error) { + if len(input) == 0 { + return nil, nil + } + + raw := input[0].(map[string]interface{}) + + startString := raw["start"].(string) + startTime, err := date.ParseTime(time.RFC3339, startString) + if err != nil { + return nil, fmt.Errorf("Failed to parse `start` time %q as an RFC3339 date: %+v", startString, err) + } + endString := raw["end"].(string) + endTime, err := date.ParseTime(time.RFC3339, endString) + if err != nil { + return nil, fmt.Errorf("Failed to parse `end` time %q as an RFC3339 date: %+v", endString, err) + } + + timeZone := raw["timezone"].(string) + timeWindow := insights.TimeWindow{ + TimeZone: utils.String(timeZone), + Start: &date.Time{ + Time: startTime, + }, + End: &date.Time{ + Time: endTime, + }, + } + return &timeWindow, nil +} + +func expandAzureRmMonitorAutoScaleSettingRecurrence(input []interface{}) *insights.Recurrence { + if len(input) == 0 { + return nil + } + + recurrenceRaw := input[0].(map[string]interface{}) + + timeZone := recurrenceRaw["timezone"].(string) + days := make([]string, 0) + for _, dayItem := range recurrenceRaw["days"].([]interface{}) { + days = append(days, dayItem.(string)) + } + + hours := make([]int32, 0) + for _, hourItem := range recurrenceRaw["hours"].([]interface{}) { + hours = append(hours, int32(hourItem.(int))) + } + + minutes := make([]int32, 0) + for _, minuteItem := range recurrenceRaw["minutes"].([]interface{}) { + minutes = append(minutes, int32(minuteItem.(int))) + } + + return &insights.Recurrence{ + // API docs say this has to be `Week`. + Frequency: insights.RecurrenceFrequencyWeek, + Schedule: &insights.RecurrentSchedule{ + TimeZone: utils.String(timeZone), + Days: &days, + Hours: &hours, + Minutes: &minutes, + }, + } +} + +func expandAzureRmMonitorAutoScaleSettingNotifications(input []interface{}) *[]insights.AutoscaleNotification { + notifications := make([]insights.AutoscaleNotification, 0) + + for _, v := range input { + notificationRaw := v.(map[string]interface{}) + + emailsRaw := notificationRaw["email"].([]interface{}) + emailRaw := emailsRaw[0].(map[string]interface{}) + email := expandAzureRmMonitorAutoScaleSettingNotificationEmail(emailRaw) + + configsRaw := notificationRaw["webhook"].([]interface{}) + webhooks := expandAzureRmMonitorAutoScaleSettingNotificationWebhook(configsRaw) + + notification := insights.AutoscaleNotification{ + Email: email, + Operation: utils.String("scale"), + Webhooks: webhooks, + } + notifications = append(notifications, notification) + } + + return ¬ifications +} + +func expandAzureRmMonitorAutoScaleSettingNotificationEmail(input map[string]interface{}) *insights.EmailNotification { + customEmails := make([]string, 0) + if v, ok := input["custom_emails"]; ok { + for _, item := range v.([]interface{}) { + customEmails = append(customEmails, item.(string)) + } + } + + email := insights.EmailNotification{ + CustomEmails: &customEmails, + SendToSubscriptionAdministrator: utils.Bool(input["send_to_subscription_administrator"].(bool)), + SendToSubscriptionCoAdministrators: utils.Bool(input["send_to_subscription_co_administrator"].(bool)), + } + + return &email +} + +func expandAzureRmMonitorAutoScaleSettingNotificationWebhook(input []interface{}) *[]insights.WebhookNotification { + webhooks := make([]insights.WebhookNotification, 0) + + for _, v := range input { + webhookRaw := v.(map[string]interface{}) + + webhook := insights.WebhookNotification{ + ServiceURI: utils.String(webhookRaw["service_uri"].(string)), + } + + if props, ok := webhookRaw["properties"]; ok { + properties := make(map[string]*string) + for key, value := range props.(map[string]interface{}) { + properties[key] = utils.String(value.(string)) + } + + webhook.Properties = properties + } + + webhooks = append(webhooks, webhook) + } + + return &webhooks +} + +func flattenAzureRmMonitorAutoScaleSettingProfile(profiles *[]insights.AutoscaleProfile) ([]interface{}, error) { + if profiles == nil { + return []interface{}{}, nil + } + + results := make([]interface{}, 0) + for _, profile := range *profiles { + result := make(map[string]interface{}) + + if name := profile.Name; name != nil { + result["name"] = *name + } + + capacity, err := flattenAzureRmMonitorAutoScaleSettingCapacity(profile.Capacity) + if err != nil { + return nil, fmt.Errorf("Error flattening `capacity`: %+v", err) + } + result["capacity"] = capacity + + result["fixed_date"] = flattenAzureRmMonitorAutoScaleSettingFixedDate(profile.FixedDate) + result["recurrence"] = flattenAzureRmMonitorAutoScaleSettingRecurrence(profile.Recurrence) + + rule, err := flattenAzureRmMonitorAutoScaleSettingRules(profile.Rules) + if err != nil { + return nil, fmt.Errorf("Error flattening Rule: %s", err) + } + result["rule"] = rule + + results = append(results, result) + } + return results, nil +} + +func flattenAzureRmMonitorAutoScaleSettingCapacity(input *insights.ScaleCapacity) ([]interface{}, error) { + if input == nil { + return []interface{}{}, nil + } + + result := make(map[string]interface{}) + + if minStr := input.Minimum; minStr != nil { + min, err := strconv.Atoi(*minStr) + if err != nil { + return nil, fmt.Errorf("Error converting Minimum Scale Capacity %q to an int: %+v", *minStr, err) + } + result["minimum"] = min + } + + if maxStr := input.Maximum; maxStr != nil { + max, err := strconv.Atoi(*maxStr) + if err != nil { + return nil, fmt.Errorf("Error converting Maximum Scale Capacity %q to an int: %+v", *maxStr, err) + } + result["maximum"] = max + } + + if defaultCapacityStr := input.Default; defaultCapacityStr != nil { + defaultCapacity, err := strconv.Atoi(*defaultCapacityStr) + if err != nil { + return nil, fmt.Errorf("Error converting Default Scale Capacity %q to an int: %+v", *defaultCapacityStr, err) + } + result["default"] = defaultCapacity + } + + return []interface{}{result}, nil +} + +func flattenAzureRmMonitorAutoScaleSettingRules(input *[]insights.ScaleRule) ([]interface{}, error) { + if input == nil { + return []interface{}{}, nil + } + + results := make([]interface{}, 0) + for _, rule := range *input { + result := make(map[string]interface{}) + + metricTriggers := make([]interface{}, 0) + if trigger := rule.MetricTrigger; trigger != nil { + output := make(map[string]interface{}) + + output["operator"] = string(trigger.Operator) + output["statistic"] = string(trigger.Statistic) + output["time_aggregation"] = string(trigger.TimeAggregation) + + if trigger.MetricName != nil { + output["metric_name"] = *trigger.MetricName + } + + if trigger.MetricResourceURI != nil { + output["metric_resource_id"] = *trigger.MetricResourceURI + } + + if trigger.TimeGrain != nil { + output["time_grain"] = *trigger.TimeGrain + } + + if trigger.TimeWindow != nil { + output["time_window"] = *trigger.TimeWindow + } + + if trigger.Threshold != nil { + output["threshold"] = *trigger.Threshold + } + + metricTriggers = append(metricTriggers, output) + } + + result["metric_trigger"] = metricTriggers + + scaleActions := make([]interface{}, 0) + if v := rule.ScaleAction; v != nil { + action := make(map[string]interface{}) + + action["direction"] = string(v.Direction) + action["type"] = string(v.Type) + + if v.Cooldown != nil { + action["cooldown"] = *v.Cooldown + } + + if val := v.Value; val != nil && *val != "" { + i, err := strconv.Atoi(*val) + if err != nil { + return nil, fmt.Errorf("`value` %q was not convertable to an int: %s", *val, err) + } + action["value"] = i + } + + scaleActions = append(scaleActions, action) + } + + result["scale_action"] = scaleActions + + results = append(results, result) + } + + return results, nil +} + +func flattenAzureRmMonitorAutoScaleSettingFixedDate(input *insights.TimeWindow) []interface{} { + if input == nil { + return []interface{}{} + } + + result := make(map[string]interface{}) + + if timezone := input.TimeZone; timezone != nil { + result["timezone"] = *timezone + } + + if start := input.Start; start != nil { + result["start"] = start.String() + } + + if end := input.End; end != nil { + result["end"] = end.String() + } + + return []interface{}{result} +} + +func flattenAzureRmMonitorAutoScaleSettingRecurrence(input *insights.Recurrence) []interface{} { + if input == nil { + return []interface{}{} + } + + result := make(map[string]interface{}) + + if schedule := input.Schedule; schedule != nil { + + if timezone := schedule.TimeZone; timezone != nil { + result["timezone"] = *timezone + } + + days := make([]string, 0) + if s := schedule.Days; s != nil { + days = *s + } + result["days"] = days + + hours := make([]int, 0) + if schedule.Hours != nil { + for _, v := range *schedule.Hours { + hours = append(hours, int(v)) + } + } + result["hours"] = hours + + minutes := make([]int, 0) + if schedule.Minutes != nil { + for _, v := range *schedule.Minutes { + minutes = append(minutes, int(v)) + } + } + result["minutes"] = minutes + } + + return []interface{}{result} +} + +func flattenAzureRmMonitorAutoScaleSettingNotification(notifications *[]insights.AutoscaleNotification) []interface{} { + results := make([]interface{}, 0) + + if notifications == nil { + return results + } + + for _, notification := range *notifications { + result := make(map[string]interface{}) + + emails := make([]interface{}, 0) + if email := notification.Email; email != nil { + block := make(map[string]interface{}) + + if send := email.SendToSubscriptionAdministrator; send != nil { + block["send_to_subscription_administrator"] = *send + } + + if send := email.SendToSubscriptionCoAdministrators; send != nil { + block["send_to_subscription_co_administrator"] = *send + } + + customEmails := make([]interface{}, 0) + if custom := email.CustomEmails; custom != nil { + for _, v := range *custom { + customEmails = append(customEmails, v) + } + } + block["custom_emails"] = customEmails + + emails = append(emails, block) + } + result["email"] = emails + + webhooks := make([]interface{}, 0) + if hooks := notification.Webhooks; hooks != nil { + for _, v := range *hooks { + hook := make(map[string]interface{}) + + if v.ServiceURI != nil { + hook["service_uri"] = *v.ServiceURI + } + + props := make(map[string]string) + for key, value := range v.Properties { + if value != nil { + props[key] = *value + } + } + hook["properties"] = props + webhooks = append(webhooks, hook) + } + } + + result["webhook"] = webhooks + + results = append(results, result) + } + return results +} + +func validateMonitorAutoScaleSettingsTimeZone() schema.SchemaValidateFunc { + // from https://docs.microsoft.com/en-us/rest/api/monitor/autoscalesettings/createorupdate#timewindow + timeZones := []string{ + "Dateline Standard Time", + "UTC-11", + "Hawaiian Standard Time", + "Alaskan Standard Time", + "Pacific Standard Time (Mexico)", + "Pacific Standard Time", + "US Mountain Standard Time", + "Mountain Standard Time (Mexico)", + "Mountain Standard Time", + "Central America Standard Time", + "Central Standard Time", + "Central Standard Time (Mexico)", + "Canada Central Standard Time", + "SA Pacific Standard Time", + "Eastern Standard Time", + "US Eastern Standard Time", + "Venezuela Standard Time", + "Paraguay Standard Time", + "Atlantic Standard Time", + "Central Brazilian Standard Time", + "SA Western Standard Time", + "Pacific SA Standard Time", + "Newfoundland Standard Time", + "E. South America Standard Time", + "Argentina Standard Time", + "SA Eastern Standard Time", + "Greenland Standard Time", + "Montevideo Standard Time", + "Bahia Standard Time", + "UTC-02", + "Mid-Atlantic Standard Time", + "Azores Standard Time", + "Cape Verde Standard Time", + "Morocco Standard Time", + "UTC", + "GMT Standard Time", + "Greenwich Standard Time", + "W. Europe Standard Time", + "Central Europe Standard Time", + "Romance Standard Time", + "Central European Standard Time", + "W. Central Africa Standard Time", + "Namibia Standard Time", + "Jordan Standard Time", + "GTB Standard Time", + "Middle East Standard Time", + "Egypt Standard Time", + "Syria Standard Time", + "E. Europe Standard Time", + "South Africa Standard Time", + "FLE Standard Time", + "Turkey Standard Time", + "Israel Standard Time", + "Kaliningrad Standard Time", + "Libya Standard Time", + "Arabic Standard Time", + "Arab Standard Time", + "Belarus Standard Time", + "Russian Standard Time", + "E. Africa Standard Time", + "Iran Standard Time", + "Arabian Standard Time", + "Azerbaijan Standard Time", + "Russia Time Zone 3", + "Mauritius Standard Time", + "Georgian Standard Time", + "Caucasus Standard Time", + "Afghanistan Standard Time", + "West Asia Standard Time", + "Ekaterinburg Standard Time", + "Pakistan Standard Time", + "India Standard Time", + "Sri Lanka Standard Time", + "Nepal Standard Time", + "Central Asia Standard Time", + "Bangladesh Standard Time", + "N. Central Asia Standard Time", + "Myanmar Standard Time", + "SE Asia Standard Time", + "North Asia Standard Time", + "China Standard Time", + "North Asia East Standard Time", + "Singapore Standard Time", + "W. Australia Standard Time", + "Taipei Standard Time", + "Ulaanbaatar Standard Time", + "Tokyo Standard Time", + "Korea Standard Time", + "Yakutsk Standard Time", + "Cen. Australia Standard Time", + "AUS Central Standard Time", + "E. Australia Standard Time", + "AUS Eastern Standard Time", + "West Pacific Standard Time", + "Tasmania Standard Time", + "Magadan Standard Time", + "Vladivostok Standard Time", + "Russia Time Zone 10", + "Central Pacific Standard Time", + "Russia Time Zone 11", + "New Zealand Standard Time", + "UTC+12", + "Fiji Standard Time", + "Kamchatka Standard Time", + "Tonga Standard Time", + "Samoa Standard Time", + "Line Islands Standard Time", + } + return validation.StringInSlice(timeZones, false) +} diff --git a/azurerm/resource_arm_monitor_autoscale_setting_test.go b/azurerm/resource_arm_monitor_autoscale_setting_test.go new file mode 100644 index 000000000000..a827e088e604 --- /dev/null +++ b/azurerm/resource_arm_monitor_autoscale_setting_test.go @@ -0,0 +1,949 @@ +package azurerm + +import ( + "fmt" + "net/http" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMMonitorAutoScaleSetting_basic(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + config := testAccAzureRMMonitorAutoScaleSetting_basic(ri, location) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.name", "metricRules"), + resource.TestCheckResourceAttr(resourceName, "profile.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + resource.TestCheckNoResourceAttr(resourceName, "tags.$type"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMonitorAutoScaleSetting_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + ), + }, + { + Config: testAccAzureRMMonitorAutoScaleSetting_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_monitor_autoscale_setting"), + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_multipleProfiles(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + config := testAccAzureRMMonitorAutoScaleSetting_multipleProfiles(ri, location) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "2"), + resource.TestCheckResourceAttr(resourceName, "profile.0.name", "primary"), + resource.TestCheckResourceAttr(resourceName, "profile.1.name", "secondary"), + ), + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_update(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMonitorAutoScaleSetting_capacity(ri, location, 1, 3, 2), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.minimum", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.maximum", "3"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.default", "2"), + ), + }, + { + Config: testAccAzureRMMonitorAutoScaleSetting_capacity(ri, location, 2, 4, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.minimum", "2"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.maximum", "4"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.default", "3"), + ), + }, + { + Config: testAccAzureRMMonitorAutoScaleSetting_capacity(ri, location, 2, 45, 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.minimum", "2"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.maximum", "45"), + resource.TestCheckResourceAttr(resourceName, "profile.0.capacity.0.default", "3"), + ), + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_multipleRules(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMonitorAutoScaleSetting_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.name", "metricRules"), + resource.TestCheckResourceAttr(resourceName, "profile.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.rule.0.scale_action.0.direction", "Increase"), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + ), + }, + { + Config: testAccAzureRMMonitorAutoScaleSetting_multipleRules(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.name", "metricRules"), + resource.TestCheckResourceAttr(resourceName, "profile.0.rule.#", "2"), + resource.TestCheckResourceAttr(resourceName, "profile.0.rule.0.scale_action.0.direction", "Increase"), + resource.TestCheckResourceAttr(resourceName, "profile.0.rule.1.scale_action.0.direction", "Decrease"), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + ), + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_customEmails(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMonitorAutoScaleSetting_email(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.0.custom_emails.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.0.custom_emails.0", fmt.Sprintf("acctest1-%d@example.com", ri)), + ), + }, + { + Config: testAccAzureRMMonitorAutoScaleSetting_emailUpdated(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.0.custom_emails.#", "2"), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.0.custom_emails.0", fmt.Sprintf("acctest1-%d@example.com", ri)), + resource.TestCheckResourceAttr(resourceName, "notification.0.email.0.custom_emails.1", fmt.Sprintf("acctest2-%d@example.com", ri)), + ), + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_recurrence(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + config := testAccAzureRMMonitorAutoScaleSetting_recurrence(ri, location) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.name", "recurrence"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_recurrenceUpdate(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMonitorAutoScaleSetting_recurrence(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.#", "3"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.0", "Monday"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.1", "Wednesday"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.2", "Friday"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.hours.0", "18"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.minutes.0", "0"), + ), + }, + { + Config: testAccAzureRMMonitorAutoScaleSetting_recurrenceUpdated(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.#", "3"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.0", "Monday"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.1", "Tuesday"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.days.2", "Wednesday"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.hours.0", "20"), + resource.TestCheckResourceAttr(resourceName, "profile.0.recurrence.0.minutes.0", "15"), + ), + }, + }, + }) +} + +func TestAccAzureRMMonitorAutoScaleSetting_fixedDate(t *testing.T) { + resourceName := "azurerm_monitor_autoscale_setting.test" + ri := tf.AccRandTimeInt() + location := testLocation() + config := testAccAzureRMMonitorAutoScaleSetting_fixedDate(ri, location) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorAutoScaleSettingDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorAutoScaleSettingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "profile.#", "1"), + resource.TestCheckResourceAttr(resourceName, "profile.0.name", "fixedDate"), + resource.TestCheckResourceAttr(resourceName, "profile.0.fixed_date.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testCheckAzureRMMonitorAutoScaleSettingExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + autoscaleSettingName := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Monitor AutoScale Setting: %s", autoscaleSettingName) + } + + conn := testAccProvider.Meta().(*ArmClient).autoscaleSettingsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := conn.Get(ctx, resourceGroup, autoscaleSettingName) + if err != nil { + return fmt.Errorf("Bad: Get on Monitor AutoScale Setting: %+v", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: AutoScale Setting %q (Resource Group: %q) does not exist", autoscaleSettingName, resourceGroup) + } + + return nil + } +} + +func testCheckAzureRMMonitorAutoScaleSettingDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).autoscaleSettingsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_monitor_autoscale_setting" { + continue + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := conn.Get(ctx, resourceGroup, name) + + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("AutoScale Setting still exists:\n%#v", resp) + } + } + + return nil +} + +func testAccAzureRMMonitorAutoScaleSetting_basic(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "metricRules" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } +} +`, template, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_requiresImport(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "import" { + name = "${azurerm_monitor_autoscale_setting.test.name}" + resource_group_name = "${azurerm_monitor_autoscale_setting.test.resource_group_name}" + location = "${azurerm_monitor_autoscale_setting.test.location}" + target_resource_id = "${azurerm_monitor_autoscale_setting.test.target_resource_id}" + + profile { + name = "metricRules" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } +} +`, template) +} + +func testAccAzureRMMonitorAutoScaleSetting_multipleProfiles(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "primary" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } + + profile { + name = "secondary" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + recurrence { + timezone = "Pacific Standard Time" + + days = [ + "Monday", + "Wednesday", + "Friday", + ] + + hours = [18] + minutes = [0] + } + } +} +`, template, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_capacity(rInt int, location string, min int, max int, defaultVal int) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + enabled = false + + profile { + name = "metricRules" + + capacity { + default = %d + minimum = %d + maximum = %d + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } +} +`, template, rInt, defaultVal, min, max) +} + +func testAccAzureRMMonitorAutoScaleSetting_multipleRules(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + enabled = true + + profile { + name = "metricRules" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "LessThan" + threshold = 25 + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } +} +`, template, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_email(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "metricRules" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } + + notification { + email { + send_to_subscription_administrator = false + send_to_subscription_co_administrator = false + custom_emails = ["acctest1-%d@example.com"] + } + } +} +`, template, rInt, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_emailUpdated(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "metricRules" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = 1 + cooldown = "PT1M" + } + } + } + + notification { + email { + send_to_subscription_administrator = false + send_to_subscription_co_administrator = false + custom_emails = ["acctest1-%d@example.com", "acctest2-%d@example.com"] + } + } +} +`, template, rInt, rInt, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_recurrence(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "recurrence" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + recurrence { + timezone = "Pacific Standard Time" + + days = [ + "Monday", + "Wednesday", + "Friday", + ] + + hours = [18] + minutes = [0] + } + } + + notification { + email { + send_to_subscription_administrator = false + send_to_subscription_co_administrator = false + } + } +} +`, template, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_recurrenceUpdated(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "recurrence" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + recurrence { + timezone = "Pacific Standard Time" + + days = [ + "Monday", + "Tuesday", + "Wednesday", + ] + + hours = [20] + minutes = [15] + } + } + + notification { + email { + send_to_subscription_administrator = false + send_to_subscription_co_administrator = false + } + } +} +`, template, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_fixedDate(rInt int, location string) string { + template := testAccAzureRMMonitorAutoScaleSetting_template(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "acctestautoscale-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "fixedDate" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + fixed_date { + timezone = "Pacific Standard Time" + start = "2020-06-18T00:00:00Z" + end = "2020-06-18T23:59:59Z" + } + } +} +`, template, rInt) +} + +func testAccAzureRMMonitorAutoScaleSetting_template(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "internal" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + name = "acctvmss-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + upgrade_policy_mode = "Manual" + + sku { + name = "Standard_F2" + tier = "Standard" + capacity = 2 + } + + os_profile { + computer_name_prefix = "testvm-%d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } + + network_profile { + name = "TestNetworkProfile-%d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + primary = true + } + } + + storage_profile_os_disk { + name = "" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "StandardSSD_LRS" + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, rInt, location, rInt, rInt, rInt, rInt) +} diff --git a/azurerm/resource_arm_monitor_log_profile_test.go b/azurerm/resource_arm_monitor_log_profile_test.go index db897e834747..5cd8e84f736a 100644 --- a/azurerm/resource_arm_monitor_log_profile_test.go +++ b/azurerm/resource_arm_monitor_log_profile_test.go @@ -51,7 +51,7 @@ func testAccAzureRMMonitorLogProfile_basic(t *testing.T) { ri := tf.AccRandTimeInt() rs := acctest.RandString(10) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMLogProfileDestroy, @@ -82,7 +82,7 @@ func testAccAzureRMMonitorLogProfile_requiresImport(t *testing.T) { rs := acctest.RandString(10) location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMLogProfileDestroy, @@ -106,7 +106,7 @@ func testAccAzureRMMonitorLogProfile_servicebus(t *testing.T) { ri := tf.AccRandTimeInt() rs := acctest.RandString(10) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMLogProfileDestroy, @@ -126,7 +126,7 @@ func testAccAzureRMMonitorLogProfile_complete(t *testing.T) { ri := tf.AccRandTimeInt() rs := acctest.RandString(10) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMLogProfileDestroy, @@ -147,7 +147,7 @@ func testAccAzureRMMonitorLogProfile_disappears(t *testing.T) { rs := acctest.RandString(10) config := testAccAzureRMMonitorLogProfile_basicConfig(ri, rs, testLocation()) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMLogProfileDestroy, diff --git a/azurerm/resource_arm_monitor_metric_alertrule.go b/azurerm/resource_arm_monitor_metric_alertrule.go new file mode 100644 index 000000000000..f0d4ce9b7ded --- /dev/null +++ b/azurerm/resource_arm_monitor_metric_alertrule.go @@ -0,0 +1,443 @@ +package azurerm + +import ( + "fmt" + "log" + "strings" + + "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmMonitorMetricAlertRule() *schema.Resource { + return &schema.Resource{ + Create: resourceArmMonitorMetricAlertRuleCreateUpdate, + Read: resourceArmMonitorMetricAlertRuleRead, + Update: resourceArmMonitorMetricAlertRuleCreateUpdate, + Delete: resourceArmMonitorMetricAlertRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_group_name": resourceGroupNameSchema(), + + "location": locationSchema(), + + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "resource_id": { + Type: schema.TypeString, + Required: true, + }, + + "metric_name": { + Type: schema.TypeString, + Required: true, + }, + + "operator": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.ConditionOperatorGreaterThan), + string(insights.ConditionOperatorGreaterThanOrEqual), + string(insights.ConditionOperatorLessThan), + string(insights.ConditionOperatorLessThanOrEqual), + }, true), + }, + + "threshold": { + Type: schema.TypeFloat, + Required: true, + }, + + "period": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIso8601Duration(), + }, + + "aggregation": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + ValidateFunc: validation.StringInSlice([]string{ + string(insights.TimeAggregationOperatorAverage), + string(insights.TimeAggregationOperatorLast), + string(insights.TimeAggregationOperatorMaximum), + string(insights.TimeAggregationOperatorMinimum), + string(insights.TimeAggregationOperatorTotal), + }, true), + }, + + "email_action": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "send_to_service_owners": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "custom_emails": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + "webhook_action": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_uri": { + Type: schema.TypeString, + Required: true, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + ValidateFunc: validateMetricAlertRuleTags, + }, + }, + } +} + +func resourceArmMonitorMetricAlertRuleCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).monitorAlertRulesClient + ctx := meta.(*ArmClient).StopContext + + log.Printf("[INFO] preparing arguments for AzureRM Alert Rule creation.") + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Alert Rule %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_monitor_metric_alertrule", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) + tags := d.Get("tags").(map[string]interface{}) + + alertRule, err := expandAzureRmMonitorMetricThresholdAlertRule(d) + if err != nil { + return err + } + + alertRuleResource := insights.AlertRuleResource{ + Name: &name, + Location: &location, + Tags: expandTags(tags), + AlertRule: alertRule, + } + + if _, err = client.CreateOrUpdate(ctx, resourceGroup, name, alertRuleResource); err != nil { + return err + } + + read, err := client.Get(ctx, resourceGroup, name) + if err != nil { + return err + } + if read.ID == nil { + return fmt.Errorf("Cannot read AzureRM Alert Rule %q (Resource Group %s) ID", name, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmMonitorMetricAlertRuleRead(d, meta) +} + +func resourceArmMonitorMetricAlertRuleRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).monitorAlertRulesClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + name := id.Path["alertrules"] + resourceGroup := id.ResourceGroup + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Metric Alert Rule %q (resource group %q) was not found - removing from state", name, resourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on AzureRM Metric Alert Rule %q: %+v", name, err) + } + + d.Set("name", name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + + if alertRule := resp.AlertRule; alertRule != nil { + d.Set("description", alertRule.Description) + d.Set("enabled", alertRule.IsEnabled) + + ruleCondition := alertRule.Condition + + if ruleCondition != nil { + if thresholdRuleCondition, ok := ruleCondition.AsThresholdRuleCondition(); ok && thresholdRuleCondition != nil { + d.Set("operator", string(thresholdRuleCondition.Operator)) + d.Set("threshold", *thresholdRuleCondition.Threshold) + d.Set("period", thresholdRuleCondition.WindowSize) + d.Set("aggregation", string(thresholdRuleCondition.TimeAggregation)) + + dataSource := thresholdRuleCondition.DataSource + + if dataSource != nil { + if metricDataSource, ok := dataSource.AsRuleMetricDataSource(); ok && metricDataSource != nil { + d.Set("resource_id", metricDataSource.ResourceURI) + d.Set("metric_name", metricDataSource.MetricName) + } + } + } + } + + email_actions := make([]interface{}, 0) + webhook_actions := make([]interface{}, 0) + + for _, ruleAction := range *alertRule.Actions { + if emailAction, ok := ruleAction.AsRuleEmailAction(); ok && emailAction != nil { + email_action := make(map[string]interface{}, 1) + + if sendToOwners := emailAction.SendToServiceOwners; sendToOwners != nil { + email_action["send_to_service_owners"] = *sendToOwners + } + + custom_emails := make([]string, 0) + if s := emailAction.CustomEmails; s != nil { + custom_emails = *s + } + email_action["custom_emails"] = custom_emails + + email_actions = append(email_actions, email_action) + } else if webhookAction, ok := ruleAction.AsRuleWebhookAction(); ok && webhookAction != nil { + webhook_action := make(map[string]interface{}, 1) + + webhook_action["service_uri"] = *webhookAction.ServiceURI + + properties := make(map[string]string) + for k, v := range webhookAction.Properties { + if k != "$type" { + if v != nil { + properties[k] = *v + } + } + } + webhook_action["properties"] = properties + + webhook_actions = append(webhook_actions, webhook_action) + } + } + + d.Set("email_action", email_actions) + d.Set("webhook_action", webhook_actions) + } + + // Return a new tag map filtered by the specified tag names. + tagMap := filterTags(resp.Tags, "$type") + + flattenAndSetTags(d, tagMap) + + return nil +} + +func resourceArmMonitorMetricAlertRuleDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).monitorAlertRulesClient + ctx := meta.(*ArmClient).StopContext + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + name := id.Path["alertrules"] + resourceGroup := id.ResourceGroup + + resp, err := client.Delete(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp) { + return nil + } + + return fmt.Errorf("Error deleting Metric Alert Rule %q (resource group %q): %+v", name, resourceGroup, err) + } + + return err +} + +func expandAzureRmMonitorMetricThresholdAlertRule(d *schema.ResourceData) (*insights.AlertRule, error) { + name := d.Get("name").(string) + + resource := d.Get("resource_id").(string) + metric_name := d.Get("metric_name").(string) + + metricDataSource := insights.RuleMetricDataSource{ + ResourceURI: &resource, + MetricName: &metric_name, + } + + operator := d.Get("operator").(string) + threshold := d.Get("threshold").(float64) + period := d.Get("period").(string) + aggregation := d.Get("aggregation").(string) + + thresholdRuleCondition := insights.ThresholdRuleCondition{ + DataSource: metricDataSource, + Operator: insights.ConditionOperator(operator), + Threshold: &threshold, + TimeAggregation: insights.TimeAggregationOperator(aggregation), + WindowSize: &period, + } + + actions := make([]insights.BasicRuleAction, 0, 2) + + // Email action + + email_actions := d.Get("email_action").([]interface{}) + + if len(email_actions) > 0 { + email_action := email_actions[0].(map[string]interface{}) + emailAction := insights.RuleEmailAction{} + + if v, ok := email_action["custom_emails"]; ok { + custom_emails := v.([]interface{}) + + customEmails := make([]string, 0) + for _, customEmail := range custom_emails { + custom_email := customEmail.(string) + customEmails = append(customEmails, custom_email) + } + + emailAction.CustomEmails = &customEmails + } + + if v, ok := email_action["send_to_service_owners"]; ok { + sendToServiceOwners := v.(bool) + emailAction.SendToServiceOwners = &sendToServiceOwners + } + + actions = append(actions, emailAction) + } + + // Webhook action + + webhook_actions := d.Get("webhook_action").([]interface{}) + + if len(webhook_actions) > 0 { + webhook_action := webhook_actions[0].(map[string]interface{}) + + service_uri := webhook_action["service_uri"].(string) + + webhook_properties := make(map[string]*string) + + if v, ok := webhook_action["properties"]; ok { + properties := v.(map[string]interface{}) + + for property_key, property_value := range properties { + property_string := property_value.(string) + webhook_properties[property_key] = &property_string + } + } + + webhookAction := insights.RuleWebhookAction{ + ServiceURI: &service_uri, + Properties: webhook_properties, + } + + actions = append(actions, webhookAction) + } + + enabled := d.Get("enabled").(bool) + + alertRule := insights.AlertRule{ + Name: &name, + Condition: &thresholdRuleCondition, + Actions: &actions, + IsEnabled: &enabled, + } + + if v, ok := d.GetOk("description"); ok { + description := v.(string) + alertRule.Description = &description + } + + return &alertRule, nil +} + +func validateMonitorMetricAlertRuleTags(v interface{}, f string) (warnings []string, errors []error) { + // Normal validation required by any AzureRM resource. + warnings, errors = validateAzureRMTags(v, f) + + tagsMap := v.(map[string]interface{}) + + for k := range tagsMap { + if strings.EqualFold(k, "$type") { + errors = append(errors, fmt.Errorf("the %q is not allowed as tag name", k)) + } + } + + return warnings, errors +} diff --git a/azurerm/resource_arm_monitor_metric_alertrule_test.go b/azurerm/resource_arm_monitor_metric_alertrule_test.go new file mode 100644 index 000000000000..b0feadac65f6 --- /dev/null +++ b/azurerm/resource_arm_monitor_metric_alertrule_test.go @@ -0,0 +1,336 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestValidateMonitorMetricAlertRuleTags(t *testing.T) { + cases := []struct { + Name string + Value map[string]interface{} + ErrCount int + }{ + { + Name: "Single Valid", + Value: map[string]interface{}{ + "hello": "world", + }, + ErrCount: 0, + }, + { + Name: "Single Invalid", + Value: map[string]interface{}{ + "$Type": "hello/world", + }, + ErrCount: 1, + }, + { + Name: "Single Invalid lowercase", + Value: map[string]interface{}{ + "$type": "hello/world", + }, + ErrCount: 1, + }, + { + Name: "Multiple Valid", + Value: map[string]interface{}{ + "hello": "world", + "foo": "bar", + }, + ErrCount: 0, + }, + { + Name: "Multiple Invalid", + Value: map[string]interface{}{ + "hello": "world", + "$type": "Microsoft.Foo/Bar", + }, + ErrCount: 1, + }, + } + + for _, tc := range cases { + _, errors := validateMonitorMetricAlertRuleTags(tc.Value, "azurerm_metric_alert_rule") + + if len(errors) != tc.ErrCount { + t.Fatalf("Expected %q to return %d errors but returned %d", tc.Name, tc.ErrCount, len(errors)) + } + } +} + +func TestAccAzureRMMonitorMetricAlertRule_virtualMachineCpu(t *testing.T) { + resourceName := "azurerm_monitor_metric_alertrule.test" + ri := tf.AccRandTimeInt() + preConfig := testAccAzureRMMonitorMetricAlertRule_virtualMachineCpu(ri, testLocation(), true) + postConfig := testAccAzureRMMonitorMetricAlertRule_virtualMachineCpu(ri, testLocation(), false) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorMetricAlertRuleDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorMetricAlertRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckNoResourceAttr(resourceName, "tags.$type"), + ), + }, + { + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorMetricAlertRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckNoResourceAttr(resourceName, "tags.$type"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorMetricAlertRuleExists(resourceName), + resource.TestCheckNoResourceAttr(resourceName, "tags.$type"), + ), + }, + }, + }) +} + +func TestAccAzureRMMonitorMetricAlertRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_monitor_metric_alertrule.test" + ri := tf.AccRandTimeInt() + location := testLocation() + enabled := true + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorMetricAlertRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMonitorMetricAlertRule_virtualMachineCpu(ri, location, enabled), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorMetricAlertRuleExists(resourceName), + ), + }, + { + Config: testAccAzureRMMonitorMetricAlertRule_requiresImport(ri, location, enabled), + ExpectError: testRequiresImportError("azurerm_monitor_metric_alertrule"), + }, + }, + }) +} + +func TestAccAzureRMMonitorMetricAlertRule_sqlDatabaseStorage(t *testing.T) { + resourceName := "azurerm_monitor_metric_alertrule.test" + ri := tf.AccRandTimeInt() + config := testAccAzureRMMonitorMetricAlertRule_sqlDatabaseStorage(ri, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMonitorMetricAlertRuleDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMonitorMetricAlertRuleExists(resourceName), + resource.TestCheckNoResourceAttr(resourceName, "tags.$type"), + ), + }, + }, + }) +} + +func testCheckAzureRMMonitorMetricAlertRuleExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + name := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Alert Rule: %s", name) + } + + client := testAccProvider.Meta().(*ArmClient).monitorAlertRulesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Bad: Alert Rule %q (resource group: %q) does not exist", name, resourceGroup) + } + + return fmt.Errorf("Bad: Get on monitorAlertRulesClient: %+v", err) + } + + return nil + } +} + +func testCheckAzureRMMonitorMetricAlertRuleDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).monitorAlertRulesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_monitor_metric_alertrule" { + continue + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := client.Get(ctx, resourceGroup, name) + + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return err + } + + return fmt.Errorf("Alert Rule still exists:\n%#v", resp) + } + + return nil +} + +func testAccAzureRMMonitorMetricAlertRule_virtualMachineCpu(rInt int, location string, enabled bool) string { + template := testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_explicit(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_metric_alertrule" "test" { + name = "${azurerm_virtual_machine.test.name}-cpu" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + description = "An alert rule to watch the metric Percentage CPU" + + enabled = %t + + resource_id = "${azurerm_virtual_machine.test.id}" + metric_name = "Percentage CPU" + operator = "GreaterThan" + threshold = 75 + aggregation = "Average" + period = "PT5M" + + email_action { + send_to_service_owners = false + + custom_emails = [ + "support@azure.microsoft.com", + ] + } + + webhook_action { + service_uri = "https://requestb.in/18jamc41" + + properties = { + severity = "incredible" + acceptance_test = "true" + } + } +} +`, template, enabled) +} + +func testAccAzureRMMonitorMetricAlertRule_requiresImport(rInt int, location string, enabled bool) string { + template := testAccAzureRMMonitorMetricAlertRule_virtualMachineCpu(rInt, location, enabled) + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_metric_alertrule" "import" { + name = "${azurerm_monitor_metric_alertrule.test.name}" + resource_group_name = "${azurerm_monitor_metric_alertrule.test.resource_group_name}" + location = "${azurerm_monitor_metric_alertrule.test.location}" + description = "${azurerm_monitor_metric_alertrule.test.description}" + enabled = "${azurerm_monitor_metric_alertrule.test.enabled}" + + resource_id = "${azurerm_virtual_machine.test.id}" + metric_name = "Percentage CPU" + operator = "GreaterThan" + threshold = 75 + aggregation = "Average" + period = "PT5M" + + email_action { + send_to_service_owners = false + + custom_emails = [ + "support@azure.microsoft.com", + ] + } + + webhook_action { + service_uri = "https://requestb.in/18jamc41" + + properties = { + severity = "incredible" + acceptance_test = "true" + } + } +} +`, template) +} + +func testAccAzureRMMonitorMetricAlertRule_sqlDatabaseStorage(rInt int, location string) string { + basicSqlServerDatabase := testAccAzureRMSqlDatabase_basic(rInt, location) + + return fmt.Sprintf(` +%s + +resource "azurerm_monitor_metric_alertrule" "test" { + name = "${azurerm_sql_database.test.name}-storage" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + description = "An alert rule to watch the metric Storage" + + enabled = true + + resource_id = "${azurerm_sql_database.test.id}" + metric_name = "storage" + operator = "GreaterThan" + threshold = 1073741824 + aggregation = "Maximum" + period = "PT10M" + + email_action { + send_to_service_owners = false + + custom_emails = [ + "support@azure.microsoft.com", + ] + } + + webhook_action { + service_uri = "https://requestb.in/18jamc41" + + properties = { + severity = "incredible" + acceptance_test = "true" + } + } +} +`, basicSqlServerDatabase) +} diff --git a/azurerm/resource_arm_mssql_elasticpool.go b/azurerm/resource_arm_mssql_elasticpool.go index 0652777553e9..f64bd09395df 100644 --- a/azurerm/resource_arm_mssql_elasticpool.go +++ b/azurerm/resource_arm_mssql_elasticpool.go @@ -3,7 +3,6 @@ package azurerm import ( "fmt" "log" - "math" "strings" "github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql" @@ -160,10 +159,19 @@ func resourceArmMsSqlElasticPool() *schema.Resource { }, "max_size_bytes": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"max_size_gb"}, + ValidateFunc: validation.IntAtLeast(0), + }, + + "max_size_gb": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + ConflictsWith: []string{"max_size_bytes"}, + ValidateFunc: validate.FloatAtLeast(0), }, "zone_redundant": { @@ -176,80 +184,8 @@ func resourceArmMsSqlElasticPool() *schema.Resource { CustomizeDiff: func(diff *schema.ResourceDiff, v interface{}) error { - name, _ := diff.GetOk("sku.0.name") - capacity, _ := diff.GetOk("sku.0.capacity") - minCapacity, _ := diff.GetOk("per_database_settings.0.min_capacity") - maxCapacity, _ := diff.GetOk("per_database_settings.0.max_capacity") - - if strings.HasPrefix(strings.ToLower(name.(string)), "gp_") { - - if capacity.(int) > 24 { - return fmt.Errorf("GeneralPurpose pricing tier only supports upto 24 vCores") - } - - if capacity.(int) < 1 { - return fmt.Errorf("GeneralPurpose pricing tier must have a minimum of 1 vCores") - } - - switch { - case capacity.(int) == 1: - case capacity.(int) == 2: - case capacity.(int) == 4: - case capacity.(int) == 8: - case capacity.(int) == 16: - case capacity.(int) == 24: - default: - return fmt.Errorf("GeneralPurpose pricing tier must have a capacity of 1, 2, 4, 8, 16, or 24 vCores") - } - } - - if strings.HasPrefix(strings.ToLower(name.(string)), "bc_") { - if capacity.(int) > 80 { - return fmt.Errorf("BusinessCritical pricing tier only supports upto 80 vCores") - } - - if capacity.(int) < 2 { - return fmt.Errorf("BusinessCritical pricing tier must have a minimum of 2 vCores") - } - - switch { - case capacity.(int) == 1: - case capacity.(int) == 2: - case capacity.(int) == 4: - case capacity.(int) == 8: - case capacity.(int) == 16: - case capacity.(int) == 24: - case capacity.(int) == 32: - case capacity.(int) == 40: - case capacity.(int) == 80: - default: - return fmt.Errorf("BusinessCritical pricing tier must have a capacity of 2, 4, 8, 16, 24, 32, 40, or 80 vCores") - } - } - - // Additional checks based of SKU type... - if strings.HasPrefix(strings.ToLower(name.(string)), "gp_") || strings.HasPrefix(strings.ToLower(name.(string)), "bc_") { - // vCore based - if maxCapacity.(float64) > float64(capacity.(int)) { - return fmt.Errorf("BusinessCritical and GeneralPurpose pricing tiers perDatabaseSettings maxCapacity must not be higher than the SKUs capacity value") - } - - if minCapacity.(float64) > maxCapacity.(float64) { - return fmt.Errorf("perDatabaseSettings maxCapacity must be greater than or equal to the perDatabaseSettings minCapacity value") - } - } else { - // DTU based - if maxCapacity.(float64) != math.Trunc(maxCapacity.(float64)) { - return fmt.Errorf("BasicPool, StandardPool, and PremiumPool SKUs must have whole numbers as their maxCapacity") - } - - if minCapacity.(float64) != math.Trunc(minCapacity.(float64)) { - return fmt.Errorf("BasicPool, StandardPool, and PremiumPool SKUs must have whole numbers as their minCapacity") - } - - if minCapacity.(float64) < 0.0 { - return fmt.Errorf("BasicPool, StandardPool, and PremiumPool SKUs per_database_settings min_capacity must be equal to or greater than zero") - } + if err := azure.MSSQLElasticPoolValidateSKU(diff); err != nil { + return err } return nil @@ -294,8 +230,15 @@ func resourceArmMsSqlElasticPoolCreateUpdate(d *schema.ResourceData, meta interf }, } - if v, ok := d.GetOk("max_size_bytes"); ok { - elasticPool.MaxSizeBytes = utils.Int64(int64(v.(int))) + if d.HasChange("max_size_gb") { + if v, ok := d.GetOk("max_size_gb"); ok { + maxSizeBytes := v.(float64) * 1073741824 + elasticPool.MaxSizeBytes = utils.Int64(int64(maxSizeBytes)) + } + } else { + if v, ok := d.GetOk("max_size_bytes"); ok { + elasticPool.MaxSizeBytes = utils.Int64(int64(v.(int))) + } } future, err := client.CreateOrUpdate(ctx, resGroup, serverName, elasticPoolName, elasticPool) @@ -352,7 +295,14 @@ func resourceArmMsSqlElasticPoolRead(d *schema.ResourceData, meta interface{}) e } if properties := resp.ElasticPoolProperties; properties != nil { - d.Set("max_size_bytes", properties.MaxSizeBytes) + // Basic tier does not return max_size_bytes, so we need to skip setting this + // value if the pricing tier is equal to Basic + if tier, ok := d.GetOk("sku.0.tier"); ok { + if !strings.EqualFold(tier.(string), "Basic") { + d.Set("max_size_gb", float64(*properties.MaxSizeBytes/int64(1073741824))) + d.Set("max_size_bytes", properties.MaxSizeBytes) + } + } d.Set("zone_redundant", properties.ZoneRedundant) //todo remove in 2.0 diff --git a/azurerm/resource_arm_mssql_elasticpool_test.go b/azurerm/resource_arm_mssql_elasticpool_test.go index 52e27a20a3ff..4bb9d015e265 100644 --- a/azurerm/resource_arm_mssql_elasticpool_test.go +++ b/azurerm/resource_arm_mssql_elasticpool_test.go @@ -26,11 +26,10 @@ func TestAccAzureRMMsSqlElasticPool_basic_DTU(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "BasicPool"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "Basic"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "50"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "5"), - resource.TestCheckResourceAttrSet(resourceName, "max_size_bytes"), + resource.TestCheckResourceAttrSet(resourceName, "max_size_gb"), resource.TestCheckResourceAttrSet(resourceName, "zone_redundant"), ), }, @@ -82,18 +81,18 @@ func TestAccAzureRMMsSqlElasticPool_standard_DTU(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "StandardPool"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "Standard"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "50"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "50"), - resource.TestCheckResourceAttrSet(resourceName, "max_size_bytes"), + resource.TestCheckResourceAttrSet(resourceName, "max_size_gb"), resource.TestCheckResourceAttrSet(resourceName, "zone_redundant"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"max_size_gb"}, }, }, }) @@ -114,24 +113,55 @@ func TestAccAzureRMMsSqlElasticPool_basic_vCore(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "GP_Gen5"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "GeneralPurpose"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "4"), - resource.TestCheckResourceAttr(resourceName, "sku.0.family", "Gen5"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0.25"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "4"), - resource.TestCheckResourceAttrSet(resourceName, "max_size_bytes"), + resource.TestCheckResourceAttrSet(resourceName, "max_size_gb"), resource.TestCheckResourceAttrSet(resourceName, "zone_redundant"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"max_size_gb"}, }, }, }) } +func TestAccAzureRMMsSqlElasticPool_basic_vCore_MaxSizeBytes(t *testing.T) { + resourceName := "azurerm_mssql_elasticpool.test" + ri := tf.AccRandTimeInt() + config := testAccAzureRMMsSqlElasticPool_basic_vCore_MaxSizeBytes(ri, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMsSqlElasticPoolDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMsSqlElasticPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "sku.0.name", "GP_Gen5"), + resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "4"), + resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0.25"), + resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "4"), + resource.TestCheckResourceAttr(resourceName, "max_size_bytes", "214748364800"), + resource.TestCheckResourceAttrSet(resourceName, "max_size_gb"), + resource.TestCheckResourceAttrSet(resourceName, "zone_redundant"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"max_size_gb"}, + }, + }, + }) +} func TestAccAzureRMMsSqlElasticPool_disappears(t *testing.T) { resourceName := "azurerm_mssql_elasticpool.test" ri := tf.AccRandTimeInt() @@ -147,7 +177,6 @@ func TestAccAzureRMMsSqlElasticPool_disappears(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "StandardPool"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "Standard"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "50"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "50"), @@ -176,7 +205,6 @@ func TestAccAzureRMMsSqlElasticPool_resize_DTU(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "StandardPool"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "Standard"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "50"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "50"), @@ -187,7 +215,6 @@ func TestAccAzureRMMsSqlElasticPool_resize_DTU(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "StandardPool"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "Standard"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "100"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "50"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "100"), @@ -214,9 +241,7 @@ func TestAccAzureRMMsSqlElasticPool_resize_vCore(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "GP_Gen5"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "GeneralPurpose"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "4"), - resource.TestCheckResourceAttr(resourceName, "sku.0.family", "Gen5"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0.25"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "4"), ), @@ -226,9 +251,7 @@ func TestAccAzureRMMsSqlElasticPool_resize_vCore(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMMsSqlElasticPoolExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "GP_Gen5"), - resource.TestCheckResourceAttr(resourceName, "sku.0.tier", "GeneralPurpose"), resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "8"), - resource.TestCheckResourceAttr(resourceName, "sku.0.family", "Gen5"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.min_capacity", "0"), resource.TestCheckResourceAttr(resourceName, "per_database_settings.0.max_capacity", "8"), ), @@ -314,7 +337,7 @@ func testCheckAzureRMMsSqlElasticPoolDisappears(resourceName string) resource.Te } func testAccAzureRMMsSqlElasticPool_basic_DTU(rInt int, location string) string { - return testAccAzureRMMsSqlElasticPool_DTU_Template(rInt, location, "BasicPool", "Basic", 50, 5242880000, 0, 5) + return testAccAzureRMMsSqlElasticPool_DTU_Template(rInt, location, "BasicPool", "Basic", 50, 4.8828125, 0, 5) } func testAccAzureRMMsSqlElasticPool_requiresImport(rInt int, location string) string { @@ -335,17 +358,21 @@ resource "azurerm_mssql_elasticpool" "test" { } func testAccAzureRMMsSqlElasticPool_standard_DTU(rInt int, location string) string { - return testAccAzureRMMsSqlElasticPool_DTU_Template(rInt, location, "StandardPool", "Standard", 50, 53687091200, 0, 50) + return testAccAzureRMMsSqlElasticPool_DTU_Template(rInt, location, "StandardPool", "Standard", 50, 50, 0, 50) } func testAccAzureRMMsSqlElasticPool_resize_DTU(rInt int, location string) string { - return testAccAzureRMMsSqlElasticPool_DTU_Template(rInt, location, "StandardPool", "Standard", 100, 107374182400, 50, 100) + return testAccAzureRMMsSqlElasticPool_DTU_Template(rInt, location, "StandardPool", "Standard", 100, 100, 50, 100) } func testAccAzureRMMsSqlElasticPool_basic_vCore(rInt int, location string) string { return testAccAzureRMMsSqlElasticPool_vCore_Template(rInt, location, "GP_Gen5", "GeneralPurpose", 4, "Gen5", 0.25, 4) } +func testAccAzureRMMsSqlElasticPool_basic_vCore_MaxSizeBytes(rInt int, location string) string { + return testAccAzureRMMsSqlElasticPool_vCore_MaxSizeBytes_Template(rInt, location, "GP_Gen5", "GeneralPurpose", 4, "Gen5", 0.25, 4) +} + func testAccAzureRMMsSqlElasticPool_resize_vCore(rInt int, location string) string { return testAccAzureRMMsSqlElasticPool_vCore_Template(rInt, location, "GP_Gen5", "GeneralPurpose", 8, "Gen5", 0, 8) } @@ -371,7 +398,45 @@ resource "azurerm_mssql_elasticpool" "test" { resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" server_name = "${azurerm_sql_server.test.name}" - max_size_bytes = 5368709120 + max_size_gb = 5 + + sku { + name = "%[3]s" + tier = "%[4]s" + capacity = %[5]d + family = "%[6]s" + } + + per_database_settings { + min_capacity = %.2[7]f + max_capacity = %.2[8]f + } +} +`, rInt, location, skuName, skuTier, skuCapacity, skuFamily, databaseSettingsMin, databaseSettingsMax) +} + +func testAccAzureRMMsSqlElasticPool_vCore_MaxSizeBytes_Template(rInt int, location string, skuName string, skuTier string, skuCapacity int, skuFamily string, databaseSettingsMin float64, databaseSettingsMax float64) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%s" +} + +resource "azurerm_sql_server" "test" { + name = "acctest%[1]d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + version = "12.0" + administrator_login = "4dm1n157r470r" + administrator_login_password = "4-v3ry-53cr37-p455w0rd" +} + +resource "azurerm_mssql_elasticpool" "test" { + name = "acctest-pool-vcore-%[1]d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + server_name = "${azurerm_sql_server.test.name}" + max_size_bytes = 214748364800 sku { name = "%[3]s" @@ -388,7 +453,7 @@ resource "azurerm_mssql_elasticpool" "test" { `, rInt, location, skuName, skuTier, skuCapacity, skuFamily, databaseSettingsMin, databaseSettingsMax) } -func testAccAzureRMMsSqlElasticPool_DTU_Template(rInt int, location string, skuName string, skuTier string, skuCapacity int, maxSizeBytes int, databaseSettingsMin int, databaseSettingsMax int) string { +func testAccAzureRMMsSqlElasticPool_DTU_Template(rInt int, location string, skuName string, skuTier string, skuCapacity int, maxSizeGB float64, databaseSettingsMin int, databaseSettingsMax int) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%[1]d" @@ -409,7 +474,7 @@ resource "azurerm_mssql_elasticpool" "test" { resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" server_name = "${azurerm_sql_server.test.name}" - max_size_bytes = %[6]d + max_size_gb = %.7[6]f sku { name = "%[3]s" @@ -422,5 +487,5 @@ resource "azurerm_mssql_elasticpool" "test" { max_capacity = %[8]d } } -`, rInt, location, skuName, skuTier, skuCapacity, maxSizeBytes, databaseSettingsMin, databaseSettingsMax) +`, rInt, location, skuName, skuTier, skuCapacity, maxSizeGB, databaseSettingsMin, databaseSettingsMax) } diff --git a/azurerm/resource_arm_mysql_configuration.go b/azurerm/resource_arm_mysql_configuration.go index ce8d0b50c9bc..8f69fcd6ab4b 100644 --- a/azurerm/resource_arm_mysql_configuration.go +++ b/azurerm/resource_arm_mysql_configuration.go @@ -51,7 +51,6 @@ func resourceArmMySQLConfigurationCreate(d *schema.ResourceData, meta interface{ name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) serverName := d.Get("server_name").(string) - value := d.Get("value").(string) properties := mysql.Configuration{ @@ -62,19 +61,19 @@ func resourceArmMySQLConfigurationCreate(d *schema.ResourceData, meta interface{ future, err := client.CreateOrUpdate(ctx, resourceGroup, serverName, name, properties) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for MySQL Configuration %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return err + return fmt.Errorf("Error waiting for create/update of MySQL Configuration %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) } read, err := client.Get(ctx, resourceGroup, serverName, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for MySQL Configuration %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) } if read.ID == nil { - return fmt.Errorf("Cannot read MySQL Configuration %s (resource group %s) ID", name, resourceGroup) + return fmt.Errorf("Cannot read MySQL Configuration %s (resource group %s, server name %s) ID", name, resourceGroup, serverName) } d.SetId(*read.ID) diff --git a/azurerm/resource_arm_mysql_configuration_test.go b/azurerm/resource_arm_mysql_configuration_test.go index 4c84ba06328f..112c85b54ae7 100644 --- a/azurerm/resource_arm_mysql_configuration_test.go +++ b/azurerm/resource_arm_mysql_configuration_test.go @@ -13,9 +13,6 @@ import ( func TestAccAzureRMMySQLConfiguration_characterSetServer(t *testing.T) { resourceName := "azurerm_mysql_configuration.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMMySQLConfiguration_characterSetServer(ri, location) - serverOnlyConfig := testAccAzureRMMySQLConfiguration_empty(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,7 +20,7 @@ func TestAccAzureRMMySQLConfiguration_characterSetServer(t *testing.T) { CheckDestroy: testCheckAzureRMMySQLConfigurationDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMMySQLConfiguration_characterSetServer(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMMySQLConfigurationValue(resourceName, "hebrew"), ), @@ -34,7 +31,7 @@ func TestAccAzureRMMySQLConfiguration_characterSetServer(t *testing.T) { ImportStateVerify: true, }, { - Config: serverOnlyConfig, + Config: testAccAzureRMMySQLConfiguration_empty(ri, testLocation()), Check: resource.ComposeTestCheckFunc( // "delete" resets back to the default value testCheckAzureRMMySQLConfigurationValueReset(ri, "character_set_server"), @@ -47,9 +44,6 @@ func TestAccAzureRMMySQLConfiguration_characterSetServer(t *testing.T) { func TestAccAzureRMMySQLConfiguration_interactiveTimeout(t *testing.T) { resourceName := "azurerm_mysql_configuration.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMMySQLConfiguration_interactiveTimeout(ri, location) - serverOnlyConfig := testAccAzureRMMySQLConfiguration_empty(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -57,7 +51,7 @@ func TestAccAzureRMMySQLConfiguration_interactiveTimeout(t *testing.T) { CheckDestroy: testCheckAzureRMMySQLConfigurationDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMMySQLConfiguration_interactiveTimeout(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMMySQLConfigurationValue(resourceName, "30"), ), @@ -68,7 +62,7 @@ func TestAccAzureRMMySQLConfiguration_interactiveTimeout(t *testing.T) { ImportStateVerify: true, }, { - Config: serverOnlyConfig, + Config: testAccAzureRMMySQLConfiguration_empty(ri, testLocation()), Check: resource.ComposeTestCheckFunc( // "delete" resets back to the default value testCheckAzureRMMySQLConfigurationValueReset(ri, "interactive_timeout"), @@ -81,9 +75,6 @@ func TestAccAzureRMMySQLConfiguration_interactiveTimeout(t *testing.T) { func TestAccAzureRMMySQLConfiguration_logSlowAdminStatements(t *testing.T) { resourceName := "azurerm_mysql_configuration.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMMySQLConfiguration_logSlowAdminStatements(ri, location) - serverOnlyConfig := testAccAzureRMMySQLConfiguration_empty(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -91,7 +82,7 @@ func TestAccAzureRMMySQLConfiguration_logSlowAdminStatements(t *testing.T) { CheckDestroy: testCheckAzureRMMySQLConfigurationDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMMySQLConfiguration_logSlowAdminStatements(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMMySQLConfigurationValue(resourceName, "on"), ), @@ -102,7 +93,7 @@ func TestAccAzureRMMySQLConfiguration_logSlowAdminStatements(t *testing.T) { ImportStateVerify: true, }, { - Config: serverOnlyConfig, + Config: testAccAzureRMMySQLConfiguration_empty(ri, testLocation()), Check: resource.ComposeTestCheckFunc( // "delete" resets back to the default value testCheckAzureRMMySQLConfigurationValueReset(ri, "log_slow_admin_statements"), diff --git a/azurerm/resource_arm_mysql_database.go b/azurerm/resource_arm_mysql_database.go index 60a402ce27ec..af039a623a1c 100644 --- a/azurerm/resource_arm_mysql_database.go +++ b/azurerm/resource_arm_mysql_database.go @@ -6,6 +6,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2017-12-01/mysql" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -62,6 +63,19 @@ func resourceArmMySqlDatabaseCreate(d *schema.ResourceData, meta interface{}) er charset := d.Get("charset").(string) collation := d.Get("collation").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serverName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing MySQL DataBase %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_mysql_database", *existing.ID) + } + } + properties := mysql.Database{ DatabaseProperties: &mysql.DatabaseProperties{ Charset: utils.String(charset), @@ -71,16 +85,16 @@ func resourceArmMySqlDatabaseCreate(d *schema.ResourceData, meta interface{}) er future, err := client.CreateOrUpdate(ctx, resourceGroup, serverName, name, properties) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for MySQL DataBase %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return err + return fmt.Errorf("Error waiting on create/update future for MySQL DataBase %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) } read, err := client.Get(ctx, resourceGroup, serverName, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for MySQL DataBase %s (resource group %s, server name %s): %v", name, resourceGroup, serverName, err) } if read.ID == nil { return fmt.Errorf("Cannot read MySQL Database %q (resource group %q) ID", name, resourceGroup) diff --git a/azurerm/resource_arm_mysql_database_test.go b/azurerm/resource_arm_mysql_database_test.go index b5156e9107f7..b1faed6f0357 100644 --- a/azurerm/resource_arm_mysql_database_test.go +++ b/azurerm/resource_arm_mysql_database_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMMySQLDatabase_basic(t *testing.T) { resourceName := "azurerm_mysql_database.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMMySQLDatabase_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMMySQLDatabase_basic(t *testing.T) { CheckDestroy: testCheckAzureRMMySQLDatabaseDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMMySQLDatabase_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMMySQLDatabaseExists(resourceName), ), @@ -35,6 +34,34 @@ func TestAccAzureRMMySQLDatabase_basic(t *testing.T) { }) } +func TestAccAzureRMMySQLDatabase_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_mysql_database.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMySQLDatabaseDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMySQLDatabase_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMySQLDatabaseExists(resourceName), + ), + }, + { + Config: testAccAzureRMMySQLDatabase_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_mysql_database"), + }, + }, + }) +} + func TestAccAzureRMMySQLDatabase_charsetUppercase(t *testing.T) { resourceName := "azurerm_mysql_database.test" ri := tf.AccRandTimeInt() @@ -184,6 +211,20 @@ resource "azurerm_mysql_database" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMMySQLDatabase_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_mysql_database" "import" { + name = "${azurerm_mysql_database.test.name}" + resource_group_name = "${azurerm_mysql_database.test.resource_group_name}" + server_name = "${azurerm_mysql_database.test.server_name}" + charset = "${azurerm_mysql_database.test.charset}" + collation = "${azurerm_mysql_database.test.collation}" +} +`, testAccAzureRMMySQLDatabase_basic(rInt, location)) +} + func testAccAzureRMMySQLDatabase_charsetUppercase(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_mysql_firewall_rule.go b/azurerm/resource_arm_mysql_firewall_rule.go index 810b0c79cd52..c7820e64ba3b 100644 --- a/azurerm/resource_arm_mysql_firewall_rule.go +++ b/azurerm/resource_arm_mysql_firewall_rule.go @@ -6,6 +6,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2017-12-01/mysql" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -59,6 +60,19 @@ func resourceArmMySqlFirewallRuleCreateUpdate(d *schema.ResourceData, meta inter startIPAddress := d.Get("start_ip_address").(string) endIPAddress := d.Get("end_ip_address").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serverName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing MySQL Firewall Rule %q (resource group %q, server name %q): %v", name, resourceGroup, serverName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_mysql_firewall_rule", *existing.ID) + } + } + properties := mysql.FirewallRule{ FirewallRuleProperties: &mysql.FirewallRuleProperties{ StartIPAddress: utils.String(startIPAddress), @@ -68,16 +82,16 @@ func resourceArmMySqlFirewallRuleCreateUpdate(d *schema.ResourceData, meta inter future, err := client.CreateOrUpdate(ctx, resourceGroup, serverName, name, properties) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for MySQL Firewall Rule %q (resource group %q, server name %q): %v", name, resourceGroup, serverName, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return err + return fmt.Errorf("Error waiting onf create/update future for MySQL Firewall Rule %q (resource group %q, server name %q): %v", name, resourceGroup, serverName, err) } read, err := client.Get(ctx, resourceGroup, serverName, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for MySQL Firewall Rule %q (resource group %q, server name %q): %v", name, resourceGroup, serverName, err) } if read.ID == nil { return fmt.Errorf("Cannot read MySQL Firewall Rule %q (Gesource Group %q) ID", name, resourceGroup) diff --git a/azurerm/resource_arm_mysql_firewall_rule_test.go b/azurerm/resource_arm_mysql_firewall_rule_test.go index 86c5d3655446..e6b566bec5f5 100644 --- a/azurerm/resource_arm_mysql_firewall_rule_test.go +++ b/azurerm/resource_arm_mysql_firewall_rule_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMMySQLFirewallRule_basic(t *testing.T) { resourceName := "azurerm_mysql_firewall_rule.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMMySQLFirewallRule_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMMySQLFirewallRule_basic(t *testing.T) { CheckDestroy: testCheckAzureRMMySQLFirewallRuleDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMMySQLFirewallRule_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMMySQLFirewallRuleExists(resourceName), ), @@ -35,6 +34,34 @@ func TestAccAzureRMMySQLFirewallRule_basic(t *testing.T) { }) } +func TestAccAzureRMMySQLFirewallRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_mysql_firewall_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMySQLFirewallRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMySQLFirewallRule_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMySQLFirewallRuleExists(resourceName), + ), + }, + { + Config: testAccAzureRMMySQLFirewallRule_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_mysql_firewall_rule"), + }, + }, + }) +} + func testCheckAzureRMMySQLFirewallRuleExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -132,3 +159,17 @@ resource "azurerm_mysql_firewall_rule" "test" { } `, rInt, location, rInt, rInt) } + +func testAccAzureRMMySQLFirewallRule_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_mysql_firewall_rule" "import" { + name = "${azurerm_mysql_firewall_rule.test.name}" + resource_group_name = "${azurerm_mysql_firewall_rule.test.resource_group_name}" + server_name = "${azurerm_mysql_firewall_rule.test.server_name}" + start_ip_address = "${azurerm_mysql_firewall_rule.test.start_ip_address}" + end_ip_address = "${azurerm_mysql_firewall_rule.test.end_ip_address}" +} +`, testAccAzureRMMySQLFirewallRule_basic(rInt, location)) +} diff --git a/azurerm/resource_arm_mysql_server.go b/azurerm/resource_arm_mysql_server.go index 405200550f5b..a3ab1281ba58 100644 --- a/azurerm/resource_arm_mysql_server.go +++ b/azurerm/resource_arm_mysql_server.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -207,6 +208,19 @@ func resourceArmMySqlServerCreate(d *schema.ResourceData, meta interface{}) erro createMode := "Default" tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing MySQL Server %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_mysql_server", *existing.ID) + } + } + sku := expandMySQLServerSku(d) storageProfile := expandMySQLStorageProfile(d) diff --git a/azurerm/resource_arm_mysql_server_test.go b/azurerm/resource_arm_mysql_server_test.go index a6f9ac228d9b..b83bd78cf95d 100644 --- a/azurerm/resource_arm_mysql_server_test.go +++ b/azurerm/resource_arm_mysql_server_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMMySQLServer_basicFiveSix(t *testing.T) { resourceName := "azurerm_mysql_server.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMMySQLServer_basicFiveSix(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMMySQLServer_basicFiveSix(t *testing.T) { CheckDestroy: testCheckAzureRMMySQLServerDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMMySQLServer_basicFiveSix(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMMySQLServerExists(resourceName), ), @@ -38,7 +37,40 @@ func TestAccAzureRMMySQLServer_basicFiveSix(t *testing.T) { }) } +func TestAccAzureRMMySQLServer_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_mysql_server.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMySQLServerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMySQLServer_basicFiveSevenUpdated(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMySQLServerExists(resourceName), + ), + }, + { + Config: testAccAzureRMMySQLServer_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_mysql_server"), + }, + }, + }) +} + func TestAccAzureRMMySQLServer_basicFiveSeven(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_mysql_server.test" ri := tf.AccRandTimeInt() config := testAccAzureRMMySQLServer_basicFiveSeven(ri, testLocation()) @@ -328,6 +360,36 @@ resource "azurerm_mysql_server" "test" { `, rInt, location, rInt) } +func testAccAzureRMMySQLServer_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_mysql_server" "import" { + name = "${azurerm_mysql_server.test.name}" + location = "${azurerm_mysql_server.test.location}" + resource_group_name = "${azurerm_mysql_server.test.name}" + + sku { + name = "B_Gen5_2" + capacity = 2 + tier = "Basic" + family = "Gen5" + } + + storage_profile { + storage_mb = 51200 + backup_retention_days = 7 + geo_redundant_backup = "Disabled" + } + + administrator_login = "acctestun" + administrator_login_password = "H@Sh1CoR3!" + version = "5.7" + ssl_enforcement = "Enabled" +} +`, testAccAzureRMMySQLServer_basicFiveSevenUpdated(rInt, location)) +} + func testAccAzureRMMySQLServer_basicFiveSevenUpdated(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_mysql_virtual_network_rule.go b/azurerm/resource_arm_mysql_virtual_network_rule.go index c8ae60c0fe44..a2ff3caafb47 100644 --- a/azurerm/resource_arm_mysql_virtual_network_rule.go +++ b/azurerm/resource_arm_mysql_virtual_network_rule.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -61,6 +62,19 @@ func resourceArmMySqlVirtualNetworkRuleCreateUpdate(d *schema.ResourceData, meta resourceGroup := d.Get("resource_group_name").(string) subnetId := d.Get("subnet_id").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serverName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing MySQL Virtual Network Rule %q (MySQL Server: %q, Resource Group: %q): %+v", name, serverName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_mysql_virtual_network_rule", *existing.ID) + } + } + // due to a bug in the API we have to ensure the Subnet's configured correctly or the API call will timeout // BUG: https://github.com/Azure/azure-rest-api-specs/issues/3719 subnetsClient := meta.(*ArmClient).subnetClient diff --git a/azurerm/resource_arm_mysql_virtual_network_rule_test.go b/azurerm/resource_arm_mysql_virtual_network_rule_test.go index 4911f141d1fb..3cafbbc1caa6 100644 --- a/azurerm/resource_arm_mysql_virtual_network_rule_test.go +++ b/azurerm/resource_arm_mysql_virtual_network_rule_test.go @@ -31,6 +31,34 @@ func TestAccAzureRMMySqlVirtualNetworkRule_basic(t *testing.T) { }) } +func TestAccAzureRMMySqlVirtualNetworkRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_mysql_virtual_network_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMMySqlVirtualNetworkRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMMySqlVirtualNetworkRule_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMMySqlVirtualNetworkRuleExists(resourceName), + ), + }, + { + Config: testAccAzureRMMySqlVirtualNetworkRule_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_mysql_virtual_network_rule"), + }, + }, + }) +} + func TestAccAzureRMMySqlVirtualNetworkRule_switchSubnets(t *testing.T) { resourceName := "azurerm_mysql_virtual_network_rule.test" ri := tf.AccRandTimeInt() @@ -259,6 +287,19 @@ resource "azurerm_mysql_virtual_network_rule" "test" { `, rInt, location, rInt, rInt, rInt, rInt) } +func testAccAzureRMMySqlVirtualNetworkRule_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_mysql_virtual_network_rule" "import" { + name = "${azurerm_mysql_virtual_network_rule.test.name}" + resource_group_name = "${azurerm_mysql_virtual_network_rule.test.resource_group_name}" + server_name = "${azurerm_mysql_virtual_network_rule.test.server_name}" + subnet_id = "${azurerm_mysql_virtual_network_rule.test.subnet_id}" +} +`, testAccAzureRMMySqlVirtualNetworkRule_basic(rInt, location)) +} + func testAccAzureRMMySqlVirtualNetworkRule_subnetSwitchPre(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_network_interface.go b/azurerm/resource_arm_network_interface.go index d25e822136fa..ef5d69bf7a97 100644 --- a/azurerm/resource_arm_network_interface.go +++ b/azurerm/resource_arm_network_interface.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -147,9 +148,10 @@ func resourceArmNetworkInterface() *schema.Resource { }, "application_security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, + Type: schema.TypeSet, + Optional: true, + Computed: true, + Deprecated: "This field has been deprecated in favour of the `azurerm_network_interface_application_security_group_association` resource.", Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: azure.ValidateResourceID, @@ -248,8 +250,22 @@ func resourceArmNetworkInterfaceCreateUpdate(d *schema.ResourceData, meta interf log.Printf("[INFO] preparing arguments for AzureRM Network Interface creation.") name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Network Interface %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_network_interface", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) enableIpForwarding := d.Get("enable_ip_forwarding").(bool) enableAcceleratedNetworking := d.Get("enable_accelerated_networking").(bool) tags := d.Get("tags").(map[string]interface{}) diff --git a/azurerm/resource_arm_network_interface_application_gateway_association.go b/azurerm/resource_arm_network_interface_application_gateway_association.go index d410d3b535fe..f249338dcd7c 100644 --- a/azurerm/resource_arm_network_interface_application_gateway_association.go +++ b/azurerm/resource_arm_network_interface_application_gateway_association.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -100,12 +101,16 @@ func resourceArmNetworkInterfaceApplicationGatewayBackendAddressPoolAssociationC pools := make([]network.ApplicationGatewayBackendAddressPool, 0) // first double-check it doesn't exist + resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, backendAddressPoolId) if p.ApplicationGatewayBackendAddressPools != nil { for _, existingPool := range *p.ApplicationGatewayBackendAddressPools { if id := existingPool.ID; id != nil { if *id == backendAddressPoolId { - // TODO: switch to using the common error once https://github.com/terraform-providers/terraform-provider-azurerm/pull/1746 is merged - return fmt.Errorf("A Network Interface <-> Application Gateway Backend Address Pool association exists between %q and %q - please import it!", networkInterfaceId, backendAddressPoolId) + if requireResourcesToBeImported { + return tf.ImportAsExistsError("azurerm_network_interface_application_gateway_backend_address_pool_association", resourceId) + } + + continue } pools = append(pools, existingPool) @@ -130,7 +135,6 @@ func resourceArmNetworkInterfaceApplicationGatewayBackendAddressPoolAssociationC return fmt.Errorf("Error waiting for completion of Application Gateway Backend Address Pool Association for NIC %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) } - resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, backendAddressPoolId) d.SetId(resourceId) return resourceArmNetworkInterfaceApplicationGatewayBackendAddressPoolAssociationRead(d, meta) diff --git a/azurerm/resource_arm_network_interface_application_gateway_association_test.go b/azurerm/resource_arm_network_interface_application_gateway_association_test.go index 017c55d26bfa..528469793f5d 100644 --- a/azurerm/resource_arm_network_interface_application_gateway_association_test.go +++ b/azurerm/resource_arm_network_interface_application_gateway_association_test.go @@ -30,6 +30,35 @@ func TestAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociati }) } +func TestAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_interface_application_gateway_backend_address_pool_association.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociationExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_interface_application_gateway_backend_address_pool_association"), + }, + }, + }) +} + func TestAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation_deleted(t *testing.T) { resourceName := "azurerm_network_interface_application_gateway_backend_address_pool_association.test" ri := tf.AccRandTimeInt() @@ -276,3 +305,16 @@ resource "azurerm_network_interface_application_gateway_backend_address_pool_ass } `, rInt, location, rInt, rInt, rInt, rInt) } + +func testAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_interface_application_gateway_backend_address_pool_association" "import" { + network_interface_id = "${azurerm_network_interface_application_gateway_backend_address_pool_association.test.network_interface_id}" + ip_configuration_name = "${azurerm_network_interface_application_gateway_backend_address_pool_association.test.ip_configuration_name}" + backend_address_pool_id = "${azurerm_network_interface_application_gateway_backend_address_pool_association.test.backend_address_pool_id}" +} +`, template) +} diff --git a/azurerm/resource_arm_network_interface_application_security_group_association.go b/azurerm/resource_arm_network_interface_application_security_group_association.go new file mode 100644 index 000000000000..f2c46a6ec5c6 --- /dev/null +++ b/azurerm/resource_arm_network_interface_application_security_group_association.go @@ -0,0 +1,297 @@ +package azurerm + +import ( + "fmt" + "log" + "strings" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmNetworkInterfaceApplicationSecurityGroupAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceArmNetworkInterfaceApplicationSecurityGroupAssociationCreate, + Read: resourceArmNetworkInterfaceApplicationSecurityGroupAssociationRead, + Delete: resourceArmNetworkInterfaceApplicationSecurityGroupAssociationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "network_interface_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "ip_configuration_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + + "application_security_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + } +} + +func resourceArmNetworkInterfaceApplicationSecurityGroupAssociationCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).ifaceClient + ctx := meta.(*ArmClient).StopContext + + log.Printf("[INFO] preparing arguments for Network Interface <-> Application Security Group Association creation.") + + networkInterfaceId := d.Get("network_interface_id").(string) + ipConfigurationName := d.Get("ip_configuration_name").(string) + applicationSecurityGroupId := d.Get("application_security_group_id").(string) + + id, err := parseAzureResourceID(networkInterfaceId) + if err != nil { + return err + } + + networkInterfaceName := id.Path["networkInterfaces"] + resourceGroup := id.ResourceGroup + + azureRMLockByName(networkInterfaceName, networkInterfaceResourceName) + defer azureRMUnlockByName(networkInterfaceName, networkInterfaceResourceName) + + read, err := client.Get(ctx, resourceGroup, networkInterfaceName, "") + if err != nil { + if utils.ResponseWasNotFound(read.Response) { + return fmt.Errorf("Network Interface %q (Resource Group %q) was not found!", networkInterfaceName, resourceGroup) + } + + return fmt.Errorf("Error retrieving Network Interface %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + props := read.InterfacePropertiesFormat + if props == nil { + return fmt.Errorf("Error: `properties` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + ipConfigs := props.IPConfigurations + if ipConfigs == nil { + return fmt.Errorf("Error: `properties.IPConfigurations` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + c := azure.FindNetworkInterfaceIPConfiguration(props.IPConfigurations, ipConfigurationName) + if c == nil { + return fmt.Errorf("Error: IP Configuration %q was not found on Network Interface %q (Resource Group %q)", ipConfigurationName, networkInterfaceName, resourceGroup) + } + + config := *c + p := config.InterfaceIPConfigurationPropertiesFormat + if p == nil { + return fmt.Errorf("Error: `IPConfiguration.properties` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + applicationSecurityGroups := make([]network.ApplicationSecurityGroup, 0) + + // first double-check it doesn't exist + if p.ApplicationSecurityGroups != nil { + for _, existingGroup := range *p.ApplicationSecurityGroups { + if id := existingGroup.ID; id != nil { + if *id == applicationSecurityGroupId { + if requireResourcesToBeImported { + return tf.ImportAsExistsError("azurerm_network_interface_application_security_group_association", *id) + } + + continue + } + + applicationSecurityGroups = append(applicationSecurityGroups, existingGroup) + } + } + } + + group := network.ApplicationSecurityGroup{ + ID: utils.String(applicationSecurityGroupId), + } + applicationSecurityGroups = append(applicationSecurityGroups, group) + p.ApplicationSecurityGroups = &applicationSecurityGroups + + props.IPConfigurations = azure.UpdateNetworkInterfaceIPConfiguration(config, props.IPConfigurations) + + future, err := client.CreateOrUpdate(ctx, resourceGroup, networkInterfaceName, read) + if err != nil { + return fmt.Errorf("Error updating Application Security Group Association for Network Interface %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for completion of Application Security Group Association for NIC %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, applicationSecurityGroupId) + d.SetId(resourceId) + + return resourceArmNetworkInterfaceApplicationSecurityGroupAssociationRead(d, meta) +} + +func resourceArmNetworkInterfaceApplicationSecurityGroupAssociationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).ifaceClient + ctx := meta.(*ArmClient).StopContext + + splitId := strings.Split(d.Id(), "|") + if len(splitId) != 2 { + return fmt.Errorf("Expected ID to be in the format {networkInterfaceId}/ipConfigurations/{ipConfigurationName}|{applicationSecurityGroupId} but got %q", d.Id()) + } + + nicID, err := parseAzureResourceID(splitId[0]) + if err != nil { + return err + } + + ipConfigurationName := nicID.Path["ipConfigurations"] + networkInterfaceName := nicID.Path["networkInterfaces"] + resourceGroup := nicID.ResourceGroup + applicationSecurityGroupId := splitId[1] + + read, err := client.Get(ctx, resourceGroup, networkInterfaceName, "") + if err != nil { + if utils.ResponseWasNotFound(read.Response) { + return fmt.Errorf("Network Interface %q (Resource Group %q) was not found!", networkInterfaceName, resourceGroup) + } + + return fmt.Errorf("Error retrieving Network Interface %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + nicProps := read.InterfacePropertiesFormat + if nicProps == nil { + return fmt.Errorf("Error: `properties` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + ipConfigs := nicProps.IPConfigurations + if ipConfigs == nil { + return fmt.Errorf("Error: `properties.IPConfigurations` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + c := azure.FindNetworkInterfaceIPConfiguration(nicProps.IPConfigurations, ipConfigurationName) + if c == nil { + log.Printf("IP Configuration %q was not found in Network Interface %q (Resource Group %q) - removing from state!", ipConfigurationName, networkInterfaceName, resourceGroup) + d.SetId("") + return nil + } + config := *c + + found := false + if props := config.InterfaceIPConfigurationPropertiesFormat; props != nil { + if groups := props.ApplicationSecurityGroups; groups != nil { + for _, group := range *groups { + if group.ID == nil { + continue + } + + if *group.ID == applicationSecurityGroupId { + found = true + break + } + } + } + } + + if !found { + log.Printf("[DEBUG] Association between Network Interface %q (Resource Group %q) and Application Security Group %q was not found - removing from state!", networkInterfaceName, resourceGroup, applicationSecurityGroupId) + d.SetId("") + return nil + } + + d.Set("application_security_group_id", applicationSecurityGroupId) + d.Set("ip_configuration_name", ipConfigurationName) + if id := read.ID; id != nil { + d.Set("network_interface_id", *id) + } + + return nil +} + +func resourceArmNetworkInterfaceApplicationSecurityGroupAssociationDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).ifaceClient + ctx := meta.(*ArmClient).StopContext + + splitId := strings.Split(d.Id(), "|") + if len(splitId) != 2 { + return fmt.Errorf("Expected ID to be in the format {networkInterfaceId}/ipConfigurations/{ipConfigurationName}|{applicationSecurityGroupId} but got %q", d.Id()) + } + + nicID, err := parseAzureResourceID(splitId[0]) + if err != nil { + return err + } + + ipConfigurationName := nicID.Path["ipConfigurations"] + networkInterfaceName := nicID.Path["networkInterfaces"] + resourceGroup := nicID.ResourceGroup + applicationSecurityGroupId := splitId[1] + + azureRMLockByName(networkInterfaceName, networkInterfaceResourceName) + defer azureRMUnlockByName(networkInterfaceName, networkInterfaceResourceName) + + read, err := client.Get(ctx, resourceGroup, networkInterfaceName, "") + if err != nil { + if utils.ResponseWasNotFound(read.Response) { + return fmt.Errorf("Network Interface %q (Resource Group %q) was not found!", networkInterfaceName, resourceGroup) + } + + return fmt.Errorf("Error retrieving Network Interface %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + nicProps := read.InterfacePropertiesFormat + if nicProps == nil { + return fmt.Errorf("Error: `properties` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + ipConfigs := nicProps.IPConfigurations + if ipConfigs == nil { + return fmt.Errorf("Error: `properties.IPConfigurations` was nil for Network Interface %q (Resource Group %q)", networkInterfaceName, resourceGroup) + } + + c := azure.FindNetworkInterfaceIPConfiguration(nicProps.IPConfigurations, ipConfigurationName) + if c == nil { + return fmt.Errorf("Error: IP Configuration %q was not found on Network Interface %q (Resource Group %q)", ipConfigurationName, networkInterfaceName, resourceGroup) + } + config := *c + + props := config.InterfaceIPConfigurationPropertiesFormat + if props == nil { + return fmt.Errorf("Error: Properties for IPConfiguration %q was nil for Network Interface %q (Resource Group %q)", ipConfigurationName, networkInterfaceName, resourceGroup) + } + + applicationSecurityGroups := make([]network.ApplicationSecurityGroup, 0) + if groups := props.ApplicationSecurityGroups; groups != nil { + for _, pool := range *groups { + if pool.ID == nil { + continue + } + + if *pool.ID != applicationSecurityGroupId { + applicationSecurityGroups = append(applicationSecurityGroups, pool) + } + } + } + props.ApplicationSecurityGroups = &applicationSecurityGroups + nicProps.IPConfigurations = azure.UpdateNetworkInterfaceIPConfiguration(config, nicProps.IPConfigurations) + + future, err := client.CreateOrUpdate(ctx, resourceGroup, networkInterfaceName, read) + if err != nil { + return fmt.Errorf("Error removing Application Security Group for Network Interface %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for removal of Application Security Group for NIC %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) + } + + return nil +} diff --git a/azurerm/resource_arm_network_interface_application_security_group_association_test.go b/azurerm/resource_arm_network_interface_application_security_group_association_test.go new file mode 100644 index 000000000000..a71f26cd4baf --- /dev/null +++ b/azurerm/resource_arm_network_interface_application_security_group_association_test.go @@ -0,0 +1,249 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_basic(t *testing.T) { + resourceName := "azurerm_network_interface_application_security_group_association.test" + rInt := tf.AccRandTimeInt() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_basic(rInt, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceApplicationSecurityGroupAssociationExists(resourceName), + ), + }, + }, + }) +} + +func TestAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_interface_application_security_group_association.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceApplicationSecurityGroupAssociationExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_interface_application_security_group_association"), + }, + }, + }) +} + +func TestAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_deleted(t *testing.T) { + resourceName := "azurerm_network_interface_application_security_group_association.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceApplicationSecurityGroupAssociationExists(resourceName), + testCheckAzureRMNetworkInterfaceApplicationSecurityGroupAssociationDisappears(resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testCheckAzureRMNetworkInterfaceApplicationSecurityGroupAssociationExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + nicID, err := parseAzureResourceID(rs.Primary.Attributes["network_interface_id"]) + if err != nil { + return err + } + + nicName := nicID.Path["networkInterfaces"] + resourceGroup := nicID.ResourceGroup + applicationSecurityGroupId := rs.Primary.Attributes["application_security_group_id"] + ipConfigurationName := rs.Primary.Attributes["ip_configuration_name"] + + client := testAccProvider.Meta().(*ArmClient).ifaceClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + read, err := client.Get(ctx, resourceGroup, nicName, "") + if err != nil { + return fmt.Errorf("Error retrieving Network Interface %q (Resource Group %q): %+v", nicName, resourceGroup, err) + } + + c := azure.FindNetworkInterfaceIPConfiguration(read.InterfacePropertiesFormat.IPConfigurations, ipConfigurationName) + if c == nil { + return fmt.Errorf("IP Configuration %q wasn't found for Network Interface %q (Resource Group %q)", ipConfigurationName, nicName, resourceGroup) + } + config := *c + + found := false + if config.InterfaceIPConfigurationPropertiesFormat.ApplicationSecurityGroups != nil { + for _, group := range *config.InterfaceIPConfigurationPropertiesFormat.ApplicationSecurityGroups { + if *group.ID == applicationSecurityGroupId { + found = true + break + } + } + } + + if !found { + return fmt.Errorf("Association between NIC %q and Application Security Group %q was not found!", nicName, applicationSecurityGroupId) + } + + return nil + } +} + +func testCheckAzureRMNetworkInterfaceApplicationSecurityGroupAssociationDisappears(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + nicID, err := parseAzureResourceID(rs.Primary.Attributes["network_interface_id"]) + if err != nil { + return err + } + + nicName := nicID.Path["networkInterfaces"] + resourceGroup := nicID.ResourceGroup + applicationSecurityGroupId := rs.Primary.Attributes["application_security_group_id"] + ipConfigurationName := rs.Primary.Attributes["ip_configuration_name"] + + client := testAccProvider.Meta().(*ArmClient).ifaceClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + read, err := client.Get(ctx, resourceGroup, nicName, "") + if err != nil { + return fmt.Errorf("Error retrieving Network Interface %q (Resource Group %q): %+v", nicName, resourceGroup, err) + } + + c := azure.FindNetworkInterfaceIPConfiguration(read.InterfacePropertiesFormat.IPConfigurations, ipConfigurationName) + if c == nil { + return fmt.Errorf("IP Configuration %q wasn't found for Network Interface %q (Resource Group %q)", ipConfigurationName, nicName, resourceGroup) + } + config := *c + + updatedGroups := make([]network.ApplicationSecurityGroup, 0) + if config.InterfaceIPConfigurationPropertiesFormat.ApplicationSecurityGroups != nil { + for _, group := range *config.InterfaceIPConfigurationPropertiesFormat.ApplicationSecurityGroups { + if *group.ID != applicationSecurityGroupId { + updatedGroups = append(updatedGroups, group) + } + } + } + config.InterfaceIPConfigurationPropertiesFormat.ApplicationSecurityGroups = &updatedGroups + + future, err := client.CreateOrUpdate(ctx, resourceGroup, nicName, read) + if err != nil { + return fmt.Errorf("Error removing Application Security Group Association for Network Interface %q (Resource Group %q): %+v", nicName, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for removal of Application Security Group Association for NIC %q (Resource Group %q): %+v", nicName, resourceGroup, err) + } + + return nil + } +} + +func testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_basic(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvn-%d" + address_space = ["10.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "internal" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.1.0/24" +} + +resource "azurerm_application_security_group" "test" { + name = "acctest-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_network_interface" "test" { + name = "acctestni-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + application_security_group_ids = [ "${azurerm_application_security_group.test.id}" ] + } +} + +resource "azurerm_network_interface_application_security_group_association" "test" { + network_interface_id = "${azurerm_network_interface.test.id}" + ip_configuration_name = "testconfiguration1" + application_security_group_id = "${azurerm_application_security_group.test.id}" +} +`, rInt, location, rInt, rInt, rInt) +} + +func testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkInterfaceApplicationSecurityGroupAssociation_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_interface_application_security_group_association" "import" { + network_interface_id = "${azurerm_network_interface_application_security_group_association.test.network_interface_id}" + ip_configuration_name = "${azurerm_network_interface_application_security_group_association.test.ip_configuration_name}" + application_security_group_id = "${azurerm_network_interface_application_security_group_association.test.application_security_group_id}" +} +`, template) +} diff --git a/azurerm/resource_arm_network_interface_backend_address_pool_association.go b/azurerm/resource_arm_network_interface_backend_address_pool_association.go index ce5f14d14bb5..520d202fd678 100644 --- a/azurerm/resource_arm_network_interface_backend_address_pool_association.go +++ b/azurerm/resource_arm_network_interface_backend_address_pool_association.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -100,12 +101,16 @@ func resourceArmNetworkInterfaceBackendAddressPoolAssociationCreate(d *schema.Re pools := make([]network.BackendAddressPool, 0) // first double-check it doesn't exist + resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, backendAddressPoolId) if p.LoadBalancerBackendAddressPools != nil { for _, existingPool := range *p.LoadBalancerBackendAddressPools { if id := existingPool.ID; id != nil { if *id == backendAddressPoolId { - // TODO: switch to using the common error once https://github.com/terraform-providers/terraform-provider-azurerm/pull/1746 is merged - return fmt.Errorf("A Network Interface <-> Load Balancer Backend Address Pool association exists between %q and %q - please import it!", networkInterfaceId, backendAddressPoolId) + if requireResourcesToBeImported { + return tf.ImportAsExistsError("azurerm_network_interface_backend_address_pool_association", resourceId) + } + + continue } pools = append(pools, existingPool) @@ -130,7 +135,6 @@ func resourceArmNetworkInterfaceBackendAddressPoolAssociationCreate(d *schema.Re return fmt.Errorf("Error waiting for completion of Backend Address Pool Association for NIC %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) } - resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, backendAddressPoolId) d.SetId(resourceId) return resourceArmNetworkInterfaceBackendAddressPoolAssociationRead(d, meta) diff --git a/azurerm/resource_arm_network_interface_backend_address_pool_association_test.go b/azurerm/resource_arm_network_interface_backend_address_pool_association_test.go index 5e3ecc9f1320..8fe67d4c93c9 100644 --- a/azurerm/resource_arm_network_interface_backend_address_pool_association_test.go +++ b/azurerm/resource_arm_network_interface_backend_address_pool_association_test.go @@ -30,6 +30,35 @@ func TestAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_basic(t *testin }) } +func TestAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_interface_backend_address_pool_association.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceBackendAddressPoolAssociationExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_interface_backend_address_pool_association"), + }, + }, + }) +} + func TestAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_deleted(t *testing.T) { resourceName := "azurerm_network_interface_backend_address_pool_association.test" ri := tf.AccRandTimeInt() @@ -223,3 +252,16 @@ resource "azurerm_network_interface_backend_address_pool_association" "test" { } `, rInt, location, rInt, rInt, rInt, rInt) } + +func testAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkInterfaceBackendAddressPoolAssociation_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_interface_backend_address_pool_association" "import" { + network_interface_id = "${azurerm_network_interface_backend_address_pool_association.test.network_interface_id}" + ip_configuration_name = "${azurerm_network_interface_backend_address_pool_association.test.ip_configuration_name}" + backend_address_pool_id = "${azurerm_network_interface_backend_address_pool_association.test.backend_address_pool_id}" +} +`, template) +} diff --git a/azurerm/resource_arm_network_interface_nat_rule_association.go b/azurerm/resource_arm_network_interface_nat_rule_association.go index 1beb1489cb72..2027b9cd338b 100644 --- a/azurerm/resource_arm_network_interface_nat_rule_association.go +++ b/azurerm/resource_arm_network_interface_nat_rule_association.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -100,12 +101,16 @@ func resourceArmNetworkInterfaceNatRuleAssociationCreate(d *schema.ResourceData, rules := make([]network.InboundNatRule, 0) // first double-check it doesn't exist + resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, natRuleId) if p.LoadBalancerInboundNatRules != nil { for _, existingRule := range *p.LoadBalancerInboundNatRules { if id := existingRule.ID; id != nil { if *id == natRuleId { - // TODO: switch to using the common error once https://github.com/terraform-providers/terraform-provider-azurerm/pull/1746 is merged - return fmt.Errorf("A Network Interface <-> Load Balancer NAT Rule association exists between %q and %q - please import it!", networkInterfaceId, natRuleId) + if requireResourcesToBeImported { + return tf.ImportAsExistsError("azurerm_network_interface_nat_rule_association", resourceId) + } + + continue } rules = append(rules, existingRule) @@ -130,7 +135,6 @@ func resourceArmNetworkInterfaceNatRuleAssociationCreate(d *schema.ResourceData, return fmt.Errorf("Error waiting for completion of NAT Rule Association for NIC %q (Resource Group %q): %+v", networkInterfaceName, resourceGroup, err) } - resourceId := fmt.Sprintf("%s/ipConfigurations/%s|%s", networkInterfaceId, ipConfigurationName, natRuleId) d.SetId(resourceId) return resourceArmNetworkInterfaceNatRuleAssociationRead(d, meta) diff --git a/azurerm/resource_arm_network_interface_nat_rule_association_test.go b/azurerm/resource_arm_network_interface_nat_rule_association_test.go index dd7d26b80471..b4367f6d090a 100644 --- a/azurerm/resource_arm_network_interface_nat_rule_association_test.go +++ b/azurerm/resource_arm_network_interface_nat_rule_association_test.go @@ -30,6 +30,35 @@ func TestAccAzureRMNetworkInterfaceNATRuleAssociation_basic(t *testing.T) { }) } +func TestAccAzureRMNetworkInterfaceNATRuleAssociation_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_interface_nat_rule_association.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterfaceNATRuleAssociation_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceNATRuleAssociationExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkInterfaceNATRuleAssociation_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_interface_nat_rule_association"), + }, + }, + }) +} + func TestAccAzureRMNetworkInterfaceNATRuleAssociation_deleted(t *testing.T) { resourceName := "azurerm_network_interface_nat_rule_association.test" ri := tf.AccRandTimeInt() @@ -226,3 +255,16 @@ resource "azurerm_network_interface_nat_rule_association" "test" { } `, rInt, location, rInt, rInt, rInt, rInt) } + +func testAccAzureRMNetworkInterfaceNATRuleAssociation_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkInterfaceNATRuleAssociation_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_interface_nat_rule_association" "import" { + network_interface_id = "${azurerm_network_interface_nat_rule_association.test.network_interface_id}" + ip_configuration_name = "${azurerm_network_interface_nat_rule_association.test.ip_configuration_name}" + nat_rule_id = "${azurerm_network_interface_nat_rule_association.test.nat_rule_id}" +} +`, template) +} diff --git a/azurerm/resource_arm_network_interface_test.go b/azurerm/resource_arm_network_interface_test.go index e0b8756c3640..8a5d05bd4078 100644 --- a/azurerm/resource_arm_network_interface_test.go +++ b/azurerm/resource_arm_network_interface_test.go @@ -30,6 +30,34 @@ func TestAccAzureRMNetworkInterface_disappears(t *testing.T) { }) } +func TestAccAzureRMNetworkInterface_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_interface.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkInterface_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkInterfaceExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkInterface_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_interface"), + }, + }, + }) +} + func TestAccAzureRMNetworkInterface_basic(t *testing.T) { resourceName := "azurerm_network_interface.test" rInt := tf.AccRandTimeInt() @@ -503,6 +531,25 @@ resource "azurerm_network_interface" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMNetworkInterface_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkInterface_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_interface" "import" { + name = "${azurerm_network_interface.test.name}" + location = "${azurerm_network_interface.test.location}" + resource_group_name = "${azurerm_network_interface.test.resource_group_name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + } +} +`, template) +} + func testAccAzureRMNetworkInterface_basicWithNetworkSecurityGroup(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -734,7 +781,7 @@ resource "azurerm_network_interface" "test" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -774,7 +821,7 @@ resource "azurerm_network_interface" "test" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "staging" } } @@ -1139,7 +1186,7 @@ resource "azurerm_application_gateway" "test" { backend_http_settings_name = "backend-http-1" } - tags { + tags = { environment = "tf01" } } @@ -1175,7 +1222,7 @@ resource "azurerm_network_security_group" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Production" } } @@ -1214,7 +1261,7 @@ resource "azurerm_public_ip" "test" { resource_group_name = "${azurerm_resource_group.test.name}" allocation_method = "Dynamic" - tags { + tags = { environment = "Production" } } @@ -1244,7 +1291,7 @@ resource "azurerm_network_interface" "test1" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "staging" } } @@ -1260,7 +1307,7 @@ resource "azurerm_network_interface" "test2" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_network_security_group.go b/azurerm/resource_arm_network_security_group.go index 97504ea610f9..06b164d53971 100644 --- a/azurerm/resource_arm_network_security_group.go +++ b/azurerm/resource_arm_network_security_group.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/set" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -163,8 +164,22 @@ func resourceArmNetworkSecurityGroupCreateUpdate(d *schema.ResourceData, meta in ctx := meta.(*ArmClient).StopContext name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Network Security Group %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_network_security_group", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) sgRules, sgErr := expandAzureRmSecurityRules(d) diff --git a/azurerm/resource_arm_network_security_group_test.go b/azurerm/resource_arm_network_security_group_test.go index 1c12ebe637d2..3635e12908f4 100644 --- a/azurerm/resource_arm_network_security_group_test.go +++ b/azurerm/resource_arm_network_security_group_test.go @@ -34,6 +34,34 @@ func TestAccAzureRMNetworkSecurityGroup_basic(t *testing.T) { }) } +func TestAccAzureRMNetworkSecurityGroup_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_security_group.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkSecurityGroup_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkSecurityGroupExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkSecurityGroup_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_security_group"), + }, + }, + }) +} + func TestAccAzureRMNetworkSecurityGroup_singleRule(t *testing.T) { resourceName := "azurerm_network_security_group.test" rInt := tf.AccRandTimeInt() @@ -314,6 +342,19 @@ resource "azurerm_network_security_group" "test" { `, rInt, location) } +func testAccAzureRMNetworkSecurityGroup_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkSecurityGroup_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_security_group" "test" { + name = "${azurerm_network_security_group.test.name}" + location = "${azurerm_network_security_group.test.location}" + resource_group_name = "${azurerm_network_security_group.test.resource_group_name}" +} +`, template) +} + func testAccAzureRMNetworkSecurityGroup_singleRule(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -404,7 +445,7 @@ resource "azurerm_network_security_group" "test" { destination_address_prefix = "*" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -436,7 +477,7 @@ resource "azurerm_network_security_group" "test" { destination_address_prefix = "*" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_network_security_rule.go b/azurerm/resource_arm_network_security_rule.go index f5195a98ef5a..e26869bb8198 100644 --- a/azurerm/resource_arm_network_security_rule.go +++ b/azurerm/resource_arm_network_security_rule.go @@ -6,6 +6,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -160,6 +161,19 @@ func resourceArmNetworkSecurityRuleCreateUpdate(d *schema.ResourceData, meta int nsgName := d.Get("network_security_group_name").(string) resGroup := d.Get("resource_group_name").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, nsgName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Rule %q (Network Security Group %q / Resource Group %q): %s", name, nsgName, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_network_security_rule", *existing.ID) + } + } + sourcePortRange := d.Get("source_port_range").(string) destinationPortRange := d.Get("destination_port_range").(string) sourceAddressPrefix := d.Get("source_address_prefix").(string) diff --git a/azurerm/resource_arm_network_security_rule_test.go b/azurerm/resource_arm_network_security_rule_test.go index 453ebfa3328b..9a6bfcf03f22 100644 --- a/azurerm/resource_arm_network_security_rule_test.go +++ b/azurerm/resource_arm_network_security_rule_test.go @@ -35,6 +35,35 @@ func TestAccAzureRMNetworkSecurityRule_basic(t *testing.T) { }) } +func TestAccAzureRMNetworkSecurityRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_security_rule.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNetworkSecurityRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkSecurityRule_basic(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkSecurityRuleExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkSecurityRule_requiresImport(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_security_rule"), + }, + }, + }) +} + func TestAccAzureRMNetworkSecurityRule_disappears(t *testing.T) { resourceGroup := "azurerm_network_security_rule.test" rInt := tf.AccRandTimeInt() @@ -228,6 +257,8 @@ resource "azurerm_network_security_group" "test" { resource "azurerm_network_security_rule" "test" { name = "test123" + network_security_group_name = "${azurerm_network_security_group.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" priority = 100 direction = "Outbound" access = "Allow" @@ -236,12 +267,31 @@ resource "azurerm_network_security_rule" "test" { destination_port_range = "*" source_address_prefix = "*" destination_address_prefix = "*" - resource_group_name = "${azurerm_resource_group.test.name}" - network_security_group_name = "${azurerm_network_security_group.test.name}" } `, rInt, location) } +func testAccAzureRMNetworkSecurityRule_requiresImport(rInt int, location string) string { + template := testAccAzureRMNetworkSecurityRule_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_security_rule" "import" { + name = "${azurerm_network_security_rule.test.name}" + network_security_group_name = "${azurerm_network_security_rule.test.network_security_group_name}" + resource_group_name = "${azurerm_network_security_rule.test.resource_group_name}" + priority = "${azurerm_network_security_rule.test.priority}" + direction = "${azurerm_network_security_rule.test.direction}" + access = "${azurerm_network_security_rule.test.access}" + protocol = "${azurerm_network_security_rule.test.protocol}" + source_port_range = "${azurerm_network_security_rule.test.source_port_range}" + destination_port_range = "${azurerm_network_security_rule.test.destination_port_range}" + source_address_prefix = "${azurerm_network_security_rule.test.source_address_prefix}" + destination_address_prefix = "${azurerm_network_security_rule.test.destination_address_prefix}" +} +`, template) +} + func testAccAzureRMNetworkSecurityRule_updateBasic(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test1" { diff --git a/azurerm/resource_arm_network_watcher.go b/azurerm/resource_arm_network_watcher.go index f70707c50fb4..72633418c5d9 100644 --- a/azurerm/resource_arm_network_watcher.go +++ b/azurerm/resource_arm_network_watcher.go @@ -6,6 +6,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -41,6 +42,20 @@ func resourceArmNetworkWatcherCreateUpdate(d *schema.ResourceData, meta interfac name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Network Watcher %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_network_watcher", *existing.ID) + } + } + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) diff --git a/azurerm/resource_arm_network_watcher_test.go b/azurerm/resource_arm_network_watcher_test.go index 5073370c312c..4d5309542795 100644 --- a/azurerm/resource_arm_network_watcher_test.go +++ b/azurerm/resource_arm_network_watcher_test.go @@ -17,16 +17,33 @@ func TestAccAzureRMNetworkWatcher(t *testing.T) { // (which our test suite can't easily workaround) testCases := map[string]map[string]func(t *testing.T){ "basic": { - "basic": testAccAzureRMNetworkWatcher_basic, - "complete": testAccAzureRMNetworkWatcher_complete, - "update": testAccAzureRMNetworkWatcher_update, - "disappears": testAccAzureRMNetworkWatcher_disappears, + "basic": testAccAzureRMNetworkWatcher_basic, + "requiresImport": testAccAzureRMNetworkWatcher_requiresImport, + "complete": testAccAzureRMNetworkWatcher_complete, + "update": testAccAzureRMNetworkWatcher_update, + "disappears": testAccAzureRMNetworkWatcher_disappears, + }, + "DataSource": { + "basic": testAccDataSourceAzureRMNetworkWatcher_basic, + }, + "ConnectionMonitor": { + "addressBasic": testAccAzureRMConnectionMonitor_addressBasic, + "addressComplete": testAccAzureRMConnectionMonitor_addressComplete, + "addressUpdate": testAccAzureRMConnectionMonitor_addressUpdate, + "vmBasic": testAccAzureRMConnectionMonitor_vmBasic, + "vmComplete": testAccAzureRMConnectionMonitor_vmComplete, + "vmUpdate": testAccAzureRMConnectionMonitor_vmUpdate, + "destinationUpdate": testAccAzureRMConnectionMonitor_destinationUpdate, + "missingDestinationInvalid": testAccAzureRMConnectionMonitor_missingDestination, + "bothDestinationsInvalid": testAccAzureRMConnectionMonitor_conflictingDestinations, + "requiresImport": testAccAzureRMConnectionMonitor_requiresImport, }, "PacketCapture": { "localDisk": testAccAzureRMPacketCapture_localDisk, "storageAccount": testAccAzureRMPacketCapture_storageAccount, "storageAccountAndLocalDisk": testAccAzureRMPacketCapture_storageAccountAndLocalDisk, "withFilters": testAccAzureRMPacketCapture_withFilters, + "requiresImport": testAccAzureRMPacketCapture_requiresImport, }, } @@ -46,7 +63,7 @@ func TestAccAzureRMNetworkWatcher(t *testing.T) { func testAccAzureRMNetworkWatcher_basic(t *testing.T) { resourceName := "azurerm_network_watcher.test" rInt := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMNetworkWatcherDestroy, @@ -66,10 +83,38 @@ func testAccAzureRMNetworkWatcher_basic(t *testing.T) { }) } +func testAccAzureRMNetworkWatcher_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_network_watcher.test" + rInt := tf.AccRandTimeInt() + location := testLocation() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNetworkWatcherDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNetworkWatcher_basicConfig(rInt, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNetworkWatcherExists(resourceName), + ), + }, + { + Config: testAccAzureRMNetworkWatcher_requiresImportConfig(rInt, location), + ExpectError: testRequiresImportError("azurerm_network_watcher"), + }, + }, + }) +} + func testAccAzureRMNetworkWatcher_complete(t *testing.T) { resourceName := "azurerm_network_watcher.test" rInt := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMNetworkWatcherDestroy, @@ -92,7 +137,8 @@ func testAccAzureRMNetworkWatcher_complete(t *testing.T) { func testAccAzureRMNetworkWatcher_update(t *testing.T) { resourceName := "azurerm_network_watcher.test" rInt := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMNetworkWatcherDestroy, @@ -117,7 +163,7 @@ func testAccAzureRMNetworkWatcher_disappears(t *testing.T) { resourceName := "azurerm_network_watcher.test" rInt := tf.AccRandTimeInt() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMNetworkWatcherDestroy, @@ -233,6 +279,19 @@ resource "azurerm_network_watcher" "test" { `, rInt, location, rInt) } +func testAccAzureRMNetworkWatcher_requiresImportConfig(rInt int, location string) string { + template := testAccAzureRMNetworkWatcher_basicConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_network_watcher" "import" { + name = "${azurerm_network_watcher.test.name}" + location = "${azurerm_network_watcher.test.location}" + resource_group_name = "${azurerm_network_watcher.test.resource_group_name}" +} +`, template) +} + func testAccAzureRMNetworkWatcher_completeConfig(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -245,7 +304,7 @@ resource "azurerm_network_watcher" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Source" = "AccTests" } } diff --git a/azurerm/resource_arm_notification_hub.go b/azurerm/resource_arm_notification_hub.go index 6819adb48d6e..3e24861e78e6 100644 --- a/azurerm/resource_arm_notification_hub.go +++ b/azurerm/resource_arm_notification_hub.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -136,6 +137,19 @@ func resourceArmNotificationHubCreateUpdate(d *schema.ResourceData, meta interfa resourceGroup := d.Get("resource_group_name").(string) location := azureRMNormalizeLocation(d.Get("location").(string)) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, namespaceName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Notification Hub %q (Namespace %q / Resource Group %q): %+v", name, namespaceName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_notification_hub", *existing.ID) + } + } + apnsRaw := d.Get("apns_credential").([]interface{}) apnsCredential, err := expandNotificationHubsAPNSCredentials(apnsRaw) if err != nil { diff --git a/azurerm/resource_arm_notification_hub_authorization_rule.go b/azurerm/resource_arm_notification_hub_authorization_rule.go index c26b19939fb5..e5ae644dca38 100644 --- a/azurerm/resource_arm_notification_hub_authorization_rule.go +++ b/azurerm/resource_arm_notification_hub_authorization_rule.go @@ -6,6 +6,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/notificationhubs/mgmt/2017-04-01/notificationhubs" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -84,10 +85,23 @@ func resourceArmNotificationHubAuthorizationRuleCreateUpdate(d *schema.ResourceD manage := d.Get("manage").(bool) send := d.Get("send").(bool) listen := d.Get("listen").(bool) - rights := expandNotificationHubAuthorizationRuleRights(manage, send, listen) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, notificationHubName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Authorization Rule %q (Notification Hub %q / Namespace %q / Resource Group %q): %+v", name, notificationHubName, namespaceName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_notification_hub_authorization_rule", *existing.ID) + } + } + parameters := notificationhubs.SharedAccessAuthorizationRuleCreateOrUpdateParameters{ Properties: ¬ificationhubs.SharedAccessAuthorizationRuleProperties{ - Rights: rights, + Rights: expandNotificationHubAuthorizationRuleRights(manage, send, listen), }, } diff --git a/azurerm/resource_arm_notification_hub_authorization_rule_test.go b/azurerm/resource_arm_notification_hub_authorization_rule_test.go index cf5a373784d0..f1c1d0d0cc9e 100644 --- a/azurerm/resource_arm_notification_hub_authorization_rule_test.go +++ b/azurerm/resource_arm_notification_hub_authorization_rule_test.go @@ -12,9 +12,7 @@ import ( func TestAccAzureRMNotificationHubAuthorizationRule_listen(t *testing.T) { resourceName := "azurerm_notification_hub_authorization_rule.test" - ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +20,7 @@ func TestAccAzureRMNotificationHubAuthorizationRule_listen(t *testing.T) { CheckDestroy: testCheckAzureRMNotificationHubAuthorizationRuleDestroy, Steps: []resource.TestStep{ { - Config: testAzureRMNotificationHubAuthorizationRule_listen(ri, location), + Config: testAzureRMNotificationHubAuthorizationRule_listen(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMNotificationHubAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "manage", "false"), @@ -41,6 +39,39 @@ func TestAccAzureRMNotificationHubAuthorizationRule_listen(t *testing.T) { }) } +func TestAccAzureRMNotificationHubAuthorizationRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_notification_hub_authorization_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNotificationHubAuthorizationRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAzureRMNotificationHubAuthorizationRule_listen(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNotificationHubAuthorizationRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "manage", "false"), + resource.TestCheckResourceAttr(resourceName, "send", "false"), + resource.TestCheckResourceAttr(resourceName, "listen", "true"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), + ), + }, + { + Config: testAzureRMNotificationHubAuthorizationRule_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_notification_hub_authorization_rule"), + }, + }, + }) +} + func TestAccAzureRMNotificationHubAuthorizationRule_manage(t *testing.T) { resourceName := "azurerm_notification_hub_authorization_rule.test" @@ -210,6 +241,20 @@ resource "azurerm_notification_hub_authorization_rule" "test" { `, template, ri) } +func testAzureRMNotificationHubAuthorizationRule_requiresImport(ri int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_notification_hub_authorization_rule" "import" { + name = "${azurerm_notification_hub_authorization_rule.test.name}" + notification_hub_name = "${azurerm_notification_hub_authorization_rule.test.notification_hub_name}" + namespace_name = "${azurerm_notification_hub_authorization_rule.test.namespace_name}" + resource_group_name = "${azurerm_notification_hub_authorization_rule.test.resource_group_name}" + listen = "${azurerm_notification_hub_authorization_rule.test.listen}" +} +`, testAzureRMNotificationHubAuthorizationRule_listen(ri, location)) +} + func testAzureRMNotificationHubAuthorizationRule_send(ri int, location string) string { template := testAzureRMNotificationHubAuthorizationRule_template(ri, location) return fmt.Sprintf(` diff --git a/azurerm/resource_arm_notification_hub_namespace.go b/azurerm/resource_arm_notification_hub_namespace.go index 3292c220f7c7..7e5c44ebe3a8 100644 --- a/azurerm/resource_arm_notification_hub_namespace.go +++ b/azurerm/resource_arm_notification_hub_namespace.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -92,15 +93,25 @@ func resourceArmNotificationHubNamespaceCreateUpdate(d *schema.ResourceData, met name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) location := azureRMNormalizeLocation(d.Get("location").(string)) - - sku := expandNotificationHubNamespacesSku(d.Get("sku").([]interface{})) - namespaceType := d.Get("namespace_type").(string) enabled := d.Get("enabled").(bool) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Notification Hub Namesapce %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_notification_hub_namespace", *existing.ID) + } + } + parameters := notificationhubs.NamespaceCreateOrUpdateParameters{ Location: utils.String(location), - Sku: sku, + Sku: expandNotificationHubNamespacesSku(d.Get("sku").([]interface{})), NamespaceProperties: ¬ificationhubs.NamespaceProperties{ Region: utils.String(location), NamespaceType: notificationhubs.NamespaceType(namespaceType), diff --git a/azurerm/resource_arm_notification_hub_namespace_test.go b/azurerm/resource_arm_notification_hub_namespace_test.go index 6ce05bbfe33b..423a6bb93dfa 100644 --- a/azurerm/resource_arm_notification_hub_namespace_test.go +++ b/azurerm/resource_arm_notification_hub_namespace_test.go @@ -12,9 +12,7 @@ import ( func TestAccAzureRMNotificationHubNamespace_free(t *testing.T) { resourceName := "azurerm_notification_hub_namespace.test" - ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +20,7 @@ func TestAccAzureRMNotificationHubNamespace_free(t *testing.T) { CheckDestroy: testCheckAzureRMNotificationHubNamespaceDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMNotificationHubNamespace_free(ri, location), + Config: testAccAzureRMNotificationHubNamespace_free(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMNotificationHubNamespaceExists(resourceName), ), @@ -36,6 +34,34 @@ func TestAccAzureRMNotificationHubNamespace_free(t *testing.T) { }) } +func TestAccAzureRMNotificationHubNamespace_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_notification_hub_namespace.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNotificationHubNamespaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNotificationHubNamespace_free(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNotificationHubNamespaceExists(resourceName), + ), + }, + { + Config: testAccAzureRMNotificationHubNamespace_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_notification_hub_namespace"), + }, + }, + }) +} + func testCheckAzureRMNotificationHubNamespaceExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -106,3 +132,20 @@ resource "azurerm_notification_hub_namespace" "test" { } `, ri, location, ri) } + +func testAccAzureRMNotificationHubNamespace_requiresImport(ri int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_notification_hub_namespace" "import" { + name = "${azurerm_notification_hub_namespace.test.name}" + resource_group_name = "${azurerm_notification_hub_namespace.test.resource_group_name}" + location = "${azurerm_notification_hub_namespace.test.location}" + namespace_type = "${azurerm_notification_hub_namespace.test.namespace_type}" + + sku { + name = "Free" + } +} +`, testAccAzureRMNotificationHubNamespace_free(ri, location)) +} diff --git a/azurerm/resource_arm_notification_hub_test.go b/azurerm/resource_arm_notification_hub_test.go index 23e9fa45be0d..6160d2befeb4 100644 --- a/azurerm/resource_arm_notification_hub_test.go +++ b/azurerm/resource_arm_notification_hub_test.go @@ -12,9 +12,7 @@ import ( func TestAccAzureRMNotificationHub_basic(t *testing.T) { resourceName := "azurerm_notification_hub.test" - ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +20,7 @@ func TestAccAzureRMNotificationHub_basic(t *testing.T) { CheckDestroy: testCheckAzureRMNotificationHubDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMNotificationHub_basic(ri, location), + Config: testAccAzureRMNotificationHub_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMNotificationHubExists(resourceName), resource.TestCheckResourceAttr(resourceName, "apns_credential.#", "0"), @@ -38,6 +36,36 @@ func TestAccAzureRMNotificationHub_basic(t *testing.T) { }) } +func TestAccAzureRMNotificationHub_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_notification_hub.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNotificationHubDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMNotificationHub_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMNotificationHubExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "apns_credential.#", "0"), + resource.TestCheckResourceAttr(resourceName, "gcm_credential.#", "0"), + ), + }, + { + Config: testAccAzureRMNotificationHub_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_notification_hub"), + }, + }, + }) +} + func testCheckAzureRMNotificationHubExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -118,3 +146,16 @@ resource "azurerm_notification_hub" "test" { } `, ri, location, ri, ri) } + +func testAccAzureRMNotificationHub_requiresImport(ri int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_notification_hub" "import" { + name = "${azurerm_notification_hub.test.name}" + namespace_name = "${azurerm_notification_hub.test.namespace_name}" + resource_group_name = "${azurerm_notification_hub.test.resource_group_name}" + location = "${azurerm_notification_hub.test.location}" +} +`, testAccAzureRMNotificationHub_basic(ri, location)) +} diff --git a/azurerm/resource_arm_packet_capture.go b/azurerm/resource_arm_packet_capture.go index 2fae2bab4767..af70e2abce14 100644 --- a/azurerm/resource_arm_packet_capture.go +++ b/azurerm/resource_arm_packet_capture.go @@ -3,6 +3,8 @@ package azurerm import ( "fmt" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "log" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" @@ -143,6 +145,19 @@ func resourceArmPacketCaptureCreate(d *schema.ResourceData, meta interface{}) er totalBytesPerSession := d.Get("maximum_bytes_per_session").(int) timeLimitInSeconds := d.Get("maximum_capture_duration").(int) + if requireResourcesToBeImported { + existing, err := client.Get(ctx, resourceGroup, watcherName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Packet Capture %q (Resource Group %q): %s", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_packet_capture", *existing.ID) + } + } + storageLocation, err := expandArmPacketCaptureStorageLocation(d) if err != nil { return err diff --git a/azurerm/resource_arm_packet_capture_test.go b/azurerm/resource_arm_packet_capture_test.go index f439f076cb34..85d232306a31 100644 --- a/azurerm/resource_arm_packet_capture_test.go +++ b/azurerm/resource_arm_packet_capture_test.go @@ -17,7 +17,7 @@ func testAccAzureRMPacketCapture_localDisk(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMPacketCaptureDestroy, @@ -37,6 +37,35 @@ func testAccAzureRMPacketCapture_localDisk(t *testing.T) { }) } +func testAccAzureRMPacketCapture_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_packet_capture.test" + ri := tf.AccRandTimeInt() + + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMPacketCaptureDestroy, + Steps: []resource.TestStep{ + { + Config: testAzureRMPacketCapture_localDiskConfig(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMPacketCaptureExists(resourceName), + ), + }, + { + Config: testAzureRMPacketCapture_localDiskConfig_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_packet_capture"), + }, + }, + }) +} func testAccAzureRMPacketCapture_storageAccount(t *testing.T) { resourceName := "azurerm_packet_capture.test" @@ -44,7 +73,7 @@ func testAccAzureRMPacketCapture_storageAccount(t *testing.T) { rs := acctest.RandString(5) location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMPacketCaptureDestroy, @@ -71,7 +100,7 @@ func testAccAzureRMPacketCapture_storageAccountAndLocalDisk(t *testing.T) { rs := acctest.RandString(5) location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMPacketCaptureDestroy, @@ -97,7 +126,7 @@ func testAccAzureRMPacketCapture_withFilters(t *testing.T) { ri := tf.AccRandTimeInt() location := testLocation() - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMPacketCaptureDestroy, @@ -275,6 +304,26 @@ resource "azurerm_packet_capture" "test" { `, config, rInt) } +func testAzureRMPacketCapture_localDiskConfig_requiresImport(rInt int, location string) string { + config := testAzureRMPacketCapture_localDiskConfig(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_packet_capture" "import" { + name = "${azurerm_packet_capture.test.name}" + network_watcher_name = "${azurerm_packet_capture.test.network_watcher_name}" + resource_group_name = "${azurerm_packet_capture.test.resource_group_name}" + target_resource_id = "${azurerm_packet_capture.test.target_resource_id}" + + storage_location { + file_path = "/var/captures/packet.cap" + } + + depends_on = ["azurerm_virtual_machine_extension.test"] +} +`, config) +} + func testAzureRMPacketCapture_localDiskConfigWithFilters(rInt int, location string) string { config := testAzureRMPacketCapture_base(rInt, location) return fmt.Sprintf(` diff --git a/azurerm/resource_arm_policy_assignment.go b/azurerm/resource_arm_policy_assignment.go index 07eae1e523a8..04450f2432b1 100644 --- a/azurerm/resource_arm_policy_assignment.go +++ b/azurerm/resource_arm_policy_assignment.go @@ -4,6 +4,8 @@ import ( "fmt" "log" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "time" "context" @@ -113,6 +115,19 @@ func resourceArmPolicyAssignmentCreateOrUpdate(d *schema.ResourceData, meta inte policyDefinitionId := d.Get("policy_definition_id").(string) displayName := d.Get("display_name").(string) + if requireResourcesToBeImported { + existing, err := client.Get(ctx, scope, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Policy Assignment %q: %s", name, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_policy_assignment", *existing.ID) + } + } + assignment := policy.Assignment{ AssignmentProperties: &policy.AssignmentProperties{ PolicyDefinitionID: utils.String(policyDefinitionId), diff --git a/azurerm/resource_arm_policy_assignment_test.go b/azurerm/resource_arm_policy_assignment_test.go index 2e70d331ad49..e20863937a25 100644 --- a/azurerm/resource_arm_policy_assignment_test.go +++ b/azurerm/resource_arm_policy_assignment_test.go @@ -12,9 +12,7 @@ import ( func TestAccAzureRMPolicyAssignment_basic(t *testing.T) { resourceName := "azurerm_policy_assignment.test" - ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +20,7 @@ func TestAccAzureRMPolicyAssignment_basic(t *testing.T) { CheckDestroy: testCheckAzureRMPolicyAssignmentDestroy, Steps: []resource.TestStep{ { - Config: testAzureRMPolicyAssignment_basic(ri, location), + Config: testAzureRMPolicyAssignment_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMPolicyAssignmentExists(resourceName), ), @@ -36,12 +34,37 @@ func TestAccAzureRMPolicyAssignment_basic(t *testing.T) { }) } -func TestAccAzureRMPolicyAssignment_deployIfNotExists_policy(t *testing.T) { - resourceName := "azurerm_policy_assignment.test" +func TestAccAzureRMPolicyAssignment_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_policy_assignment.test" ri := tf.AccRandTimeInt() - location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMPolicyAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAzureRMPolicyAssignment_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMPolicyAssignmentExists(resourceName), + ), + }, + { + Config: testAzureRMPolicyAssignment_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_policy_assignment"), + }, + }, + }) +} + +func TestAccAzureRMPolicyAssignment_deployIfNotExists_policy(t *testing.T) { + resourceName := "azurerm_policy_assignment.test" + ri := tf.AccRandTimeInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -49,7 +72,7 @@ func TestAccAzureRMPolicyAssignment_deployIfNotExists_policy(t *testing.T) { CheckDestroy: testCheckAzureRMPolicyAssignmentDestroy, Steps: []resource.TestStep{ { - Config: testAzureRMPolicyAssignment_deployIfNotExists_policy(ri, location), + Config: testAzureRMPolicyAssignment_deployIfNotExists_policy(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMPolicyAssignmentExists(resourceName), ), @@ -200,6 +223,18 @@ resource "azurerm_policy_assignment" "test" { `, ri, ri, location, ri, location, ri) } +func testAzureRMPolicyAssignment_requiresImport(ri int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_policy_assignment" "import" { + name = "${azurerm_policy_assignment.test.name}" + scope = "${azurerm_policy_assignment.test.scope}" + policy_definition_id = "${azurerm_policy_assignment.test.policy_definition_id}" +} +`, testAzureRMPolicyAssignment_basic(ri, location)) +} + func testAzureRMPolicyAssignment_deployIfNotExists_policy(ri int, location string) string { return fmt.Sprintf(` resource "azurerm_policy_definition" "test" { diff --git a/azurerm/resource_arm_policy_definition.go b/azurerm/resource_arm_policy_definition.go index 78336f4035f1..c5ac5c389982 100644 --- a/azurerm/resource_arm_policy_definition.go +++ b/azurerm/resource_arm_policy_definition.go @@ -7,6 +7,8 @@ import ( "regexp" "strings" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "time" "strconv" @@ -84,6 +86,7 @@ func resourceArmPolicyDefinition() *schema.Resource { "metadata": { Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validation.ValidateJsonString, DiffSuppressFunc: structure.SuppressJsonDiff, }, @@ -109,6 +112,19 @@ func resourceArmPolicyDefinitionCreateUpdate(d *schema.ResourceData, meta interf description := d.Get("description").(string) managementGroupID := d.Get("management_group_id").(string) + if requireResourcesToBeImported { + existing, err := getPolicyDefinition(ctx, client, name, managementGroupID) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Policy Definition %q: %s", name, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_policy_definition", *existing.ID) + } + } + properties := policy.DefinitionProperties{ PolicyType: policy.Type(policyType), Mode: policy.Mode(mode), @@ -213,33 +229,15 @@ func resourceArmPolicyDefinitionRead(d *schema.ResourceData, meta interface{}) e d.Set("display_name", props.DisplayName) d.Set("description", props.Description) - if policyRule := props.PolicyRule; policyRule != nil { - policyRuleVal := policyRule.(map[string]interface{}) - policyRuleStr, err := structure.FlattenJsonToString(policyRuleVal) - if err != nil { - return fmt.Errorf("unable to flatten JSON for `policy_rule`: %s", err) - } - + if policyRuleStr := flattenJSON(props.PolicyRule); policyRuleStr != "" { d.Set("policy_rule", policyRuleStr) } - if metadata := props.Metadata; metadata != nil { - metadataVal := metadata.(map[string]interface{}) - metadataStr, err := structure.FlattenJsonToString(metadataVal) - if err != nil { - return fmt.Errorf("unable to flatten JSON for `metadata`: %s", err) - } - + if metadataStr := flattenJSON(props.Metadata); metadataStr != "" { d.Set("metadata", metadataStr) } - if parameters := props.Parameters; parameters != nil { - paramsVal := props.Parameters.(map[string]interface{}) - parametersStr, err := structure.FlattenJsonToString(paramsVal) - if err != nil { - return fmt.Errorf("unable to flatten JSON for `parameters`: %s", err) - } - + if parametersStr := flattenJSON(props.Parameters); parametersStr != "" { d.Set("parameters", parametersStr) } } @@ -333,3 +331,15 @@ func getPolicyDefinition(ctx context.Context, client policy.DefinitionsClient, n return res, err } + +func flattenJSON(stringMap interface{}) string { + if stringMap != nil { + value := stringMap.(map[string]interface{}) + jsonString, err := structure.FlattenJsonToString(value) + if err == nil { + return jsonString + } + } + + return "" +} diff --git a/azurerm/resource_arm_policy_definition_test.go b/azurerm/resource_arm_policy_definition_test.go index b857c146f3b8..f1abb5083ff1 100644 --- a/azurerm/resource_arm_policy_definition_test.go +++ b/azurerm/resource_arm_policy_definition_test.go @@ -11,6 +11,35 @@ import ( ) func TestAccAzureRMPolicyDefinition_basic(t *testing.T) { + resourceName := "azurerm_policy_definition.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMPolicyDefinitionDestroy, + Steps: []resource.TestStep{ + { + Config: testAzureRMPolicyDefinition_basic(ri), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMPolicyDefinitionExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMPolicyDefinition_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_policy_definition.test" ri := tf.AccRandTimeInt() @@ -26,6 +55,29 @@ func TestAccAzureRMPolicyDefinition_basic(t *testing.T) { testCheckAzureRMPolicyDefinitionExists(resourceName), ), }, + { + Config: testAzureRMPolicyDefinition_requiresImport(ri), + ExpectError: testRequiresImportError("azurerm_policy_definition"), + }, + }, + }) +} + +func TestAccAzureRMPolicyDefinition_computedMetadata(t *testing.T) { + resourceName := "azurerm_policy_definition.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMPolicyDefinitionDestroy, + Steps: []resource.TestStep{ + { + Config: testAzureRMPolicyDefinition_computedMetadata(ri), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMPolicyDefinitionExists(resourceName), + ), + }, { ResourceName: resourceName, ImportState: true, @@ -175,6 +227,56 @@ PARAMETERS `, ri, ri) } +func testAzureRMPolicyDefinition_requiresImport(ri int) string { + template := testAzureRMPolicyDefinition_basic(ri) + return fmt.Sprintf(` +%s + +resource "azurerm_policy_definition" "import" { + name = "${azurerm_policy_definition.test.name}" + policy_type = "${azurerm_policy_definition.test.policy_type}" + mode = "${azurerm_policy_definition.test.mode}" + display_name = "${azurerm_policy_definition.test.display_name}" + policy_rule = "${azurerm_policy_definition.test.policy_rule}" + parameters = "${azurerm_policy_definition.test.parameters}" +} +`, template) +} + +func testAzureRMPolicyDefinition_computedMetadata(rInt int) string { + return fmt.Sprintf(` +resource "azurerm_policy_definition" "test" { + name = "acctest-%d" + policy_type = "Custom" + mode = "Indexed" + display_name = "DefaultTags" + + policy_rule = < 0 { portsRaw := applicationPortsRaw[0].(map[string]interface{}) @@ -950,6 +994,14 @@ func flattenServiceFabricClusterNodeTypes(input *[]servicefabric.NodeTypeDescrip output["name"] = *name } + if placementProperties := v.PlacementProperties; placementProperties != nil { + output["placement_properties"] = placementProperties + } + + if capacities := v.Capacities; capacities != nil { + output["capacities"] = capacities + } + if count := v.VMInstanceCount; count != nil { output["instance_count"] = int(*count) } @@ -985,7 +1037,7 @@ func flattenServiceFabricClusterNodeTypes(input *[]servicefabric.NodeTypeDescrip } output["application_ports"] = applicationPorts - ephermeralPorts := make([]interface{}, 0) + ephemeralPorts := make([]interface{}, 0) if ports := v.EphemeralPorts; ports != nil { r := make(map[string]interface{}) if start := ports.StartPort; start != nil { @@ -994,9 +1046,9 @@ func flattenServiceFabricClusterNodeTypes(input *[]servicefabric.NodeTypeDescrip if end := ports.EndPort; end != nil { r["end_port"] = int(*end) } - ephermeralPorts = append(ephermeralPorts, r) + ephemeralPorts = append(ephemeralPorts, r) } - output["ephemeral_ports"] = ephermeralPorts + output["ephemeral_ports"] = ephemeralPorts results = append(results, output) } diff --git a/azurerm/resource_arm_service_fabric_cluster_test.go b/azurerm/resource_arm_service_fabric_cluster_test.go index 2ca19bc9df14..02fd1f7a01a4 100644 --- a/azurerm/resource_arm_service_fabric_cluster_test.go +++ b/azurerm/resource_arm_service_fabric_cluster_test.go @@ -14,8 +14,6 @@ import ( func TestAccAzureRMServiceFabricCluster_basic(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMServiceFabricCluster_basic(ri, location, 3) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,7 +21,7 @@ func TestAccAzureRMServiceFabricCluster_basic(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceFabricCluster_basic(ri, testLocation(), 3), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "management_endpoint", "http://example:80"), @@ -47,10 +45,47 @@ func TestAccAzureRMServiceFabricCluster_basic(t *testing.T) { }) } +func TestAccAzureRMServiceFabricCluster_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_service_fabric_cluster.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceFabricCluster_basic(ri, testLocation(), 3), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceFabricClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "management_endpoint", "http://example:80"), + resource.TestCheckResourceAttr(resourceName, "add_on_features.#", "0"), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "client_certificate_thumbprint.#", "0"), + resource.TestCheckResourceAttr(resourceName, "azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "diagnostics_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "node_type.#", "1"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.instance_count", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + Config: testAccAzureRMServiceFabricCluster_requiresImport(ri, testLocation(), 3), + ExpectError: testRequiresImportError("azurerm_service_fabric_cluster"), + }, + }, + }) +} + func TestAccAzureRMServiceFabricCluster_manualClusterCodeVersion(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -58,7 +93,7 @@ func TestAccAzureRMServiceFabricCluster_manualClusterCodeVersion(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(ri, location, "6.3.162.9494"), + Config: testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(ri, testLocation(), "6.3.162.9494"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "upgrade_mode", "Manual"), @@ -66,7 +101,7 @@ func TestAccAzureRMServiceFabricCluster_manualClusterCodeVersion(t *testing.T) { ), }, { - Config: testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(ri, location, "6.3.176.9494"), + Config: testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(ri, testLocation(), "6.3.176.9494"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "upgrade_mode", "Manual"), @@ -85,7 +120,6 @@ func TestAccAzureRMServiceFabricCluster_manualClusterCodeVersion(t *testing.T) { func TestAccAzureRMServiceFabricCluster_manualLatest(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -93,7 +127,7 @@ func TestAccAzureRMServiceFabricCluster_manualLatest(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(ri, location, ""), + Config: testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(ri, testLocation(), ""), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "upgrade_mode", "Manual"), @@ -112,8 +146,7 @@ func TestAccAzureRMServiceFabricCluster_manualLatest(t *testing.T) { func TestAccAzureRMServiceFabricCluster_addOnFeatures(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMServiceFabricCluster_addOnFeatures(ri, location) + config := testAccAzureRMServiceFabricCluster_addOnFeatures(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -139,7 +172,6 @@ func TestAccAzureRMServiceFabricCluster_addOnFeatures(t *testing.T) { func TestAccAzureRMServiceFabricCluster_certificate(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -147,7 +179,7 @@ func TestAccAzureRMServiceFabricCluster_certificate(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_certificates(ri, location), + Config: testAccAzureRMServiceFabricCluster_certificates(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), @@ -170,7 +202,6 @@ func TestAccAzureRMServiceFabricCluster_certificate(t *testing.T) { func TestAccAzureRMServiceFabricCluster_reverseProxyCertificate(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -178,7 +209,7 @@ func TestAccAzureRMServiceFabricCluster_reverseProxyCertificate(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_reverseProxyCertificates(ri, location), + Config: testAccAzureRMServiceFabricCluster_reverseProxyCertificates(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), @@ -190,6 +221,7 @@ func TestAccAzureRMServiceFabricCluster_reverseProxyCertificate(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "fabric_settings.0.name", "Security"), resource.TestCheckResourceAttr(resourceName, "fabric_settings.0.parameters.ClusterProtectionLevel", "EncryptAndSign"), resource.TestCheckResourceAttr(resourceName, "management_endpoint", "https://example:80"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.reverse_proxy_endpoint_port", "19081"), ), }, { @@ -201,10 +233,11 @@ func TestAccAzureRMServiceFabricCluster_reverseProxyCertificate(t *testing.T) { }) } -func TestAccAzureRMServiceFabricCluster_clientCertificateThumbprint(t *testing.T) { +func TestAccAzureRMServiceFabricCluster_reverseProxyNotSet(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() location := testLocation() + config := testAccAzureRMServiceFabricCluster_basic(ri, location, 3) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -212,7 +245,107 @@ func TestAccAzureRMServiceFabricCluster_clientCertificateThumbprint(t *testing.T CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_clientCertificateThumbprint(ri, location), + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceFabricClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "management_endpoint", "http://example:80"), + resource.TestCheckResourceAttr(resourceName, "add_on_features.#", "0"), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "client_certificate_thumbprint.#", "0"), + resource.TestCheckResourceAttr(resourceName, "azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "diagnostics_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "node_type.#", "1"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.instance_count", "3"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.reverse_proxy_endpoint_port", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMServiceFabricCluster_reverseProxyUpdate(t *testing.T) { + resourceName := "azurerm_service_fabric_cluster.test" + ri := tf.AccRandTimeInt() + location := testLocation() + configBasic := testAccAzureRMServiceFabricCluster_basic(ri, location, 3) + configProxy := testAccAzureRMServiceFabricCluster_reverseProxyCertificates(ri, location) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, + Steps: []resource.TestStep{ + { + Config: configBasic, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceFabricClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "management_endpoint", "http://example:80"), + resource.TestCheckResourceAttr(resourceName, "add_on_features.#", "0"), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "client_certificate_thumbprint.#", "0"), + resource.TestCheckResourceAttr(resourceName, "azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "diagnostics_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "node_type.#", "1"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.instance_count", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + Config: configProxy, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceFabricClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), + resource.TestCheckResourceAttr(resourceName, "certificate.0.thumbprint", "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE"), + resource.TestCheckResourceAttr(resourceName, "certificate.0.x509_store_name", "My"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.#", "1"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.0.thumbprint", "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.0.x509_store_name", "My"), + resource.TestCheckResourceAttr(resourceName, "fabric_settings.0.name", "Security"), + resource.TestCheckResourceAttr(resourceName, "fabric_settings.0.parameters.ClusterProtectionLevel", "EncryptAndSign"), + resource.TestCheckResourceAttr(resourceName, "management_endpoint", "https://example:80"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.reverse_proxy_endpoint_port", "19081"), + ), + }, + { + Config: configBasic, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceFabricClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "management_endpoint", "http://example:80"), + resource.TestCheckResourceAttr(resourceName, "add_on_features.#", "0"), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "reverse_proxy_certificate.#", "0"), + resource.TestCheckResourceAttr(resourceName, "client_certificate_thumbprint.#", "0"), + resource.TestCheckResourceAttr(resourceName, "azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "diagnostics_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "node_type.#", "1"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.instance_count", "3"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.reverse_proxy_endpoint_port", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccAzureRMServiceFabricCluster_clientCertificateThumbprint(t *testing.T) { + resourceName := "azurerm_service_fabric_cluster.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceFabricCluster_clientCertificateThumbprint(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), @@ -238,7 +371,6 @@ func TestAccAzureRMServiceFabricCluster_clientCertificateThumbprint(t *testing.T func TestAccAzureRMServiceFabricCluster_readerAdminClientCertificateThumbprint(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -246,7 +378,7 @@ func TestAccAzureRMServiceFabricCluster_readerAdminClientCertificateThumbprint(t CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_readerAdminClientCertificateThumbprint(ri, location), + Config: testAccAzureRMServiceFabricCluster_readerAdminClientCertificateThumbprint(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), @@ -275,15 +407,13 @@ func TestAccAzureRMServiceFabricCluster_azureActiveDirectory(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_azureActiveDirectory(ri, location), + Config: testAccAzureRMServiceFabricCluster_azureActiveDirectory(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), @@ -311,8 +441,6 @@ func TestAccAzureRMServiceFabricCluster_diagnosticsConfig(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() rs := acctest.RandString(4) - location := testLocation() - config := testAccAzureRMServiceFabricCluster_diagnosticsConfig(ri, rs, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -320,7 +448,7 @@ func TestAccAzureRMServiceFabricCluster_diagnosticsConfig(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceFabricCluster_diagnosticsConfig(ri, rs, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "diagnostics_config.#", "1"), @@ -343,8 +471,6 @@ func TestAccAzureRMServiceFabricCluster_diagnosticsConfig(t *testing.T) { func TestAccAzureRMServiceFabricCluster_fabricSettings(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMServiceFabricCluster_fabricSettings(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -352,7 +478,7 @@ func TestAccAzureRMServiceFabricCluster_fabricSettings(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceFabricCluster_fabricSettings(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "fabric_settings.#", "1"), @@ -373,7 +499,6 @@ func TestAccAzureRMServiceFabricCluster_fabricSettings(t *testing.T) { func TestAccAzureRMServiceFabricCluster_fabricSettingsRemove(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -381,14 +506,14 @@ func TestAccAzureRMServiceFabricCluster_fabricSettingsRemove(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_fabricSettings(ri, location), + Config: testAccAzureRMServiceFabricCluster_fabricSettings(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "fabric_settings.#", "1"), ), }, { - Config: testAccAzureRMServiceFabricCluster_basic(ri, location, 3), + Config: testAccAzureRMServiceFabricCluster_basic(ri, testLocation(), 3), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "fabric_settings.#", "0"), @@ -401,8 +526,6 @@ func TestAccAzureRMServiceFabricCluster_fabricSettingsRemove(t *testing.T) { func TestAccAzureRMServiceFabricCluster_nodeTypeCustomPorts(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMServiceFabricCluster_nodeTypeCustomPorts(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -410,7 +533,7 @@ func TestAccAzureRMServiceFabricCluster_nodeTypeCustomPorts(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceFabricCluster_nodeTypeCustomPorts(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "node_type.#", "1"), @@ -434,8 +557,6 @@ func TestAccAzureRMServiceFabricCluster_nodeTypeCustomPorts(t *testing.T) { func TestAccAzureRMServiceFabricCluster_nodeTypesMultiple(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMServiceFabricCluster_nodeTypeMultiple(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -443,7 +564,7 @@ func TestAccAzureRMServiceFabricCluster_nodeTypesMultiple(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceFabricCluster_nodeTypeMultiple(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "node_type.#", "2"), @@ -467,7 +588,6 @@ func TestAccAzureRMServiceFabricCluster_nodeTypesMultiple(t *testing.T) { func TestAccAzureRMServiceFabricCluster_nodeTypesUpdate(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -475,14 +595,14 @@ func TestAccAzureRMServiceFabricCluster_nodeTypesUpdate(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceFabricCluster_basic(ri, location, 3), + Config: testAccAzureRMServiceFabricCluster_basic(ri, testLocation(), 3), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "node_type.0.instance_count", "3"), ), }, { - Config: testAccAzureRMServiceFabricCluster_basic(ri, location, 4), + Config: testAccAzureRMServiceFabricCluster_basic(ri, testLocation(), 4), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "node_type.0.instance_count", "4"), @@ -492,11 +612,38 @@ func TestAccAzureRMServiceFabricCluster_nodeTypesUpdate(t *testing.T) { }) } +func TestAccAzureRMServiceFabricCluster_nodeTypeProperties(t *testing.T) { + resourceName := "azurerm_service_fabric_cluster.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceFabricCluster_nodeTypeProperties(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceFabricClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "node_type.0.placement_properties.%", "1"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.placement_properties.HasSSD", "true"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.capacities.%", "2"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.capacities.ClientConnections", "20000"), + resource.TestCheckResourceAttr(resourceName, "node_type.0.capacities.MemoryGB", "8"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAzureRMServiceFabricCluster_tags(t *testing.T) { resourceName := "azurerm_service_fabric_cluster.test" ri := tf.AccRandTimeInt() - location := testLocation() - config := testAccAzureRMServiceFabricCluster_tags(ri, location) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -504,7 +651,7 @@ func TestAccAzureRMServiceFabricCluster_tags(t *testing.T) { CheckDestroy: testCheckAzureRMServiceFabricClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceFabricCluster_tags(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceFabricClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -590,8 +737,8 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "http://example:80" - + management_endpoint = "http://example:80" + node_type { name = "first" instance_count = %d @@ -603,6 +750,30 @@ resource "azurerm_service_fabric_cluster" "test" { `, rInt, location, rInt, count) } +func testAccAzureRMServiceFabricCluster_requiresImport(rInt int, location string, count int) string { + return fmt.Sprintf(` +%s + +resource "azurerm_service_fabric_cluster" "import" { + name = "${azurerm_service_fabric_cluster.test.name}" + resource_group_name = "${azurerm_service_fabric_cluster.test.resource_group_name}" + location = "${azurerm_service_fabric_cluster.test.location}" + reliability_level = "${azurerm_service_fabric_cluster.test.reliability_level}" + upgrade_mode = "${azurerm_service_fabric_cluster.test.upgrade_mode}" + vm_image = "${azurerm_service_fabric_cluster.test.vm_image}" + management_endpoint = "${azurerm_service_fabric_cluster.test.management_endpoint}" + + node_type { + name = "first" + instance_count = %d + is_primary = true + client_endpoint_port = 2020 + http_endpoint_port = 80 + } +} +`, testAccAzureRMServiceFabricCluster_basic(rInt, location, count), count) +} + func testAccAzureRMServiceFabricCluster_manualClusterCodeVersion(rInt int, location, clusterCodeVersion string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -618,8 +789,8 @@ resource "azurerm_service_fabric_cluster" "test" { upgrade_mode = "Manual" cluster_code_version = "%[3]s" vm_image = "Windows" - management_endpoint = "http://example:80" - + management_endpoint = "http://example:80" + node_type { name = "first" instance_count = 3 @@ -646,8 +817,8 @@ resource "azurerm_service_fabric_cluster" "test" { upgrade_mode = "Automatic" vm_image = "Windows" management_endpoint = "http://example:80" - add_on_features = ["DnsService", "RepairManager"] - + add_on_features = ["DnsService", "RepairManager"] + node_type { name = "first" instance_count = 3 @@ -673,21 +844,21 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "https://example:80" - + management_endpoint = "https://example:80" + certificate { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" x509_store_name = "My" - } - + } + fabric_settings { - name = "Security" - - parameters { + name = "Security" + + parameters = { "ClusterProtectionLevel" = "EncryptAndSign" } - } - + } + node_type { name = "first" instance_count = 3 @@ -713,26 +884,26 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "https://example:80" - + management_endpoint = "https://example:80" + certificate { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" x509_store_name = "My" - } - + } + reverse_proxy_certificate { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" x509_store_name = "My" - } - + } + fabric_settings { - name = "Security" - - parameters { + name = "Security" + + parameters = { "ClusterProtectionLevel" = "EncryptAndSign" } - } - + } + node_type { name = "first" instance_count = 3 @@ -759,26 +930,26 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "https://example:80" - + management_endpoint = "https://example:80" + certificate { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" x509_store_name = "My" - } - + } + client_certificate_thumbprint { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" is_admin = true - } - + } + fabric_settings { - name = "Security" - - parameters { + name = "Security" + + parameters = { "ClusterProtectionLevel" = "EncryptAndSign" } - } - + } + node_type { name = "first" instance_count = 3 @@ -804,31 +975,31 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "https://example:80" - + management_endpoint = "https://example:80" + certificate { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" x509_store_name = "My" - } - + } + client_certificate_thumbprint { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" is_admin = true - } - + } + client_certificate_thumbprint { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" is_admin = false - } - + } + fabric_settings { - name = "Security" - - parameters { + name = "Security" + + parameters = { "ClusterProtectionLevel" = "EncryptAndSign" } - } - + } + node_type { name = "first" instance_count = 3 @@ -865,27 +1036,27 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "https://example:80" - + management_endpoint = "https://example:80" + certificate { thumbprint = "33:41:DB:6C:F2:AF:72:C6:11:DF:3B:E3:72:1A:65:3A:F1:D4:3E:CD:50:F5:84:F8:28:79:3D:BE:91:03:C3:EE" x509_store_name = "My" - } - + } + azure_active_directory { tenant_id = "${data.azurerm_client_config.current.tenant_id}" cluster_application_id = "${azurerm_azuread_application.test.application_id}" client_application_id = "00000000-0000-0000-0000-000000000000" - } - + } + fabric_settings { - name = "Security" - - parameters { + name = "Security" + + parameters = { "ClusterProtectionLevel" = "EncryptAndSign" } - } - + } + node_type { name = "first" instance_count = 3 @@ -919,16 +1090,16 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "http://example:80" - + management_endpoint = "http://example:80" + diagnostics_config { storage_account_name = "${azurerm_storage_account.test.name}" protected_account_key_name = "StorageAccountKey1" blob_endpoint = "${azurerm_storage_account.test.primary_blob_endpoint}" queue_endpoint = "${azurerm_storage_account.test.primary_queue_endpoint}" table_endpoint = "${azurerm_storage_account.test.primary_table_endpoint}" - } - + } + node_type { name = "first" instance_count = 3 @@ -954,16 +1125,16 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "http://example:80" - + management_endpoint = "http://example:80" + fabric_settings { - name = "Security" - - parameters { + name = "Security" + + parameters = { "ClusterProtectionLevel" = "None" } - } - + } + node_type { name = "first" instance_count = 3 @@ -989,20 +1160,20 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "http://example:80" - + management_endpoint = "http://example:80" + node_type { name = "first" instance_count = 3 is_primary = true client_endpoint_port = 2020 - http_endpoint_port = 80 - + http_endpoint_port = 80 + application_ports { start_port = 20000 end_port = 29999 - } - + } + ephemeral_ports { start_port = 30000 end_port = 39999 @@ -1026,16 +1197,16 @@ resource "azurerm_service_fabric_cluster" "test" { reliability_level = "Bronze" upgrade_mode = "Automatic" vm_image = "Windows" - management_endpoint = "http://example:80" - + management_endpoint = "http://example:80" + node_type { name = "first" instance_count = 3 is_primary = true client_endpoint_port = 2020 http_endpoint_port = 80 - } - + } + node_type { name = "second" instance_count = 4 @@ -1047,13 +1218,12 @@ resource "azurerm_service_fabric_cluster" "test" { `, rInt, location, rInt) } -func testAccAzureRMServiceFabricCluster_tags(rInt int, location string) string { +func testAccAzureRMServiceFabricCluster_nodeTypeProperties(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" location = "%s" } - resource "azurerm_service_fabric_cluster" "test" { name = "acctest-%d" resource_group_name = "${azurerm_resource_group.test.name}" @@ -1062,16 +1232,52 @@ resource "azurerm_service_fabric_cluster" "test" { upgrade_mode = "Automatic" vm_image = "Windows" management_endpoint = "http://example:80" + node_type { + name = "first" + placement_properties = { + "HasSSD" = "true" + } + capacities = { + "ClientConnections" = "20000" + "MemoryGB" = "8" + } + instance_count = 3 + is_primary = true + client_endpoint_port = 2020 + http_endpoint_port = 80 + } + tags = { + "Hello" = "World" + } +} +`, rInt, location, rInt) +} +func testAccAzureRMServiceFabricCluster_tags(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_service_fabric_cluster" "test" { + name = "acctest-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + reliability_level = "Bronze" + upgrade_mode = "Automatic" + vm_image = "Windows" + management_endpoint = "http://example:80" + node_type { name = "first" instance_count = 3 is_primary = true client_endpoint_port = 2020 http_endpoint_port = 80 - } - - tags { + } + + tags = { "Hello" = "World" } } diff --git a/azurerm/resource_arm_servicebus_namespace.go b/azurerm/resource_arm_servicebus_namespace.go index 82929dd9cbc4..127c93ed0787 100644 --- a/azurerm/resource_arm_servicebus_namespace.go +++ b/azurerm/resource_arm_servicebus_namespace.go @@ -6,6 +6,8 @@ import ( "regexp" "strings" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" @@ -98,7 +100,7 @@ func resourceArmServiceBusNamespace() *schema.Resource { //so lets only allow it to be set if the SKU is premium if _, ok := d.GetOk("capacity"); ok { sku := d.Get("sku").(string) - if strings.EqualFold(sku, string(servicebus.Premium)) { + if !strings.EqualFold(sku, string(servicebus.Premium)) { return fmt.Errorf("`capacity` can only be set for a Premium SKU") } } @@ -120,6 +122,19 @@ func resourceArmServiceBusNamespaceCreateUpdate(d *schema.ResourceData, meta int sku := d.Get("sku").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Namespace %q (resource group %q) ID", name, resourceGroup) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_servicebus_namespace", *existing.ID) + } + } + parameters := servicebus.SBNamespace{ Location: &location, Sku: &servicebus.SBSku{ diff --git a/azurerm/resource_arm_servicebus_namespace_authorization_rule.go b/azurerm/resource_arm_servicebus_namespace_authorization_rule.go index bd1ed46b5806..5e581b4916d1 100644 --- a/azurerm/resource_arm_servicebus_namespace_authorization_rule.go +++ b/azurerm/resource_arm_servicebus_namespace_authorization_rule.go @@ -4,6 +4,8 @@ import ( "fmt" "log" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" @@ -54,6 +56,19 @@ func resourceArmServiceBusNamespaceAuthorizationRuleCreateUpdate(d *schema.Resou resourceGroup := d.Get("resource_group_name").(string) namespaceName := d.Get("namespace_name").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Namespace Authorization Rule %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_servicebus_namespace_authorization_rule", *existing.ID) + } + } + parameters := servicebus.SBAuthorizationRule{ Name: &name, SBAuthorizationRuleProperties: &servicebus.SBAuthorizationRuleProperties{ diff --git a/azurerm/resource_arm_servicebus_namespace_authorization_rule_test.go b/azurerm/resource_arm_servicebus_namespace_authorization_rule_test.go index 5c2d187e8557..b6b0324430a0 100644 --- a/azurerm/resource_arm_servicebus_namespace_authorization_rule_test.go +++ b/azurerm/resource_arm_servicebus_namespace_authorization_rule_test.go @@ -62,6 +62,7 @@ func testAccAzureRMServiceBusNamespaceAuthorizationRule(t *testing.T, listen, se func TestAccAzureRMServiceBusNamespaceAuthorizationRule_rightsUpdate(t *testing.T) { resourceName := "azurerm_servicebus_namespace_authorization_rule.test" + ri := tf.AccRandTimeInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -69,7 +70,7 @@ func TestAccAzureRMServiceBusNamespaceAuthorizationRule_rightsUpdate(t *testing. CheckDestroy: testCheckAzureRMServiceBusNamespaceAuthorizationRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceBusNamespaceAuthorizationRule_base(tf.AccRandTimeInt(), testLocation(), true, false, false), + Config: testAccAzureRMServiceBusNamespaceAuthorizationRule_base(ri, testLocation(), true, false, false), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusNamespaceAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "listen", "true"), @@ -78,7 +79,7 @@ func TestAccAzureRMServiceBusNamespaceAuthorizationRule_rightsUpdate(t *testing. ), }, { - Config: testAccAzureRMServiceBusNamespaceAuthorizationRule_base(tf.AccRandTimeInt(), testLocation(), true, true, true), + Config: testAccAzureRMServiceBusNamespaceAuthorizationRule_base(ri, testLocation(), true, true, true), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusNamespaceAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "name"), @@ -100,6 +101,35 @@ func TestAccAzureRMServiceBusNamespaceAuthorizationRule_rightsUpdate(t *testing. }, }) } +func TestAccAzureRMServiceBusNamespaceAuthorizationRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_namespace_authorization_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusNamespaceAuthorizationRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusNamespaceAuthorizationRule_base(ri, testLocation(), true, false, false), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusNamespaceAuthorizationRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "listen", "true"), + resource.TestCheckResourceAttr(resourceName, "send", "false"), + resource.TestCheckResourceAttr(resourceName, "manage", "false"), + ), + }, + { + Config: testAccAzureRMServiceBusNamespaceAuthorizationRule_requiresImport(ri, testLocation(), true, false, false), + ExpectError: testRequiresImportError("azurerm_servicebus_namespace_authorization_rule"), + }, + }, + }) +} func testCheckAzureRMServiceBusNamespaceAuthorizationRuleDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*ArmClient).serviceBusNamespacesClient @@ -180,3 +210,19 @@ resource "azurerm_servicebus_namespace_authorization_rule" "test" { } `, rInt, location, listen, send, manage) } + +func testAccAzureRMServiceBusNamespaceAuthorizationRule_requiresImport(rInt int, location string, listen, send, manage bool) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_namespace_authorization_rule" "import" { + name = "${azurerm_servicebus_namespace_authorization_rule.test.name}" + namespace_name = "${azurerm_servicebus_namespace_authorization_rule.test.namespace_name}" + resource_group_name = "${azurerm_servicebus_namespace_authorization_rule.test.resource_group_name}" + + listen = "${azurerm_servicebus_namespace_authorization_rule.test.listen}" + send = "${azurerm_servicebus_namespace_authorization_rule.test.send}" + manage = "${azurerm_servicebus_namespace_authorization_rule.test.manage}" +} +`, testAccAzureRMServiceBusNamespaceAuthorizationRule_base(rInt, location, listen, send, manage)) +} diff --git a/azurerm/resource_arm_servicebus_namespace_test.go b/azurerm/resource_arm_servicebus_namespace_test.go index cacbd771d9ef..b205a12f390e 100644 --- a/azurerm/resource_arm_servicebus_namespace_test.go +++ b/azurerm/resource_arm_servicebus_namespace_test.go @@ -35,6 +35,32 @@ func TestAccAzureRMServiceBusNamespace_basic(t *testing.T) { }, }) } +func TestAccAzureRMServiceBusNamespace_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_namespace.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusNamespace_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusNamespaceExists(resourceName), + ), + }, + { + Config: testAccAzureRMServiceBusNamespace_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_servicebus_namespace"), + }, + }, + }) +} func TestAccAzureRMServiceBusNamespace_readDefaultKeys(t *testing.T) { resourceName := "azurerm_servicebus_namespace.test" @@ -192,6 +218,19 @@ resource "azurerm_servicebus_namespace" "test" { `, rInt, location, rInt) } +func testAccAzureRMServiceBusNamespace_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_namespace" "import" { + name = "${azurerm_servicebus_namespace.test.name}" + location = "${azurerm_servicebus_namespace.test.location}" + resource_group_name = "${azurerm_servicebus_namespace.test.resource_group_name}" + sku = "${azurerm_servicebus_namespace.test.sku}" +} +`, testAccAzureRMServiceBusNamespace_basic(rInt, location)) +} + func testAccAzureRMServiceBusNamespaceNonStandardCasing(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_servicebus_queue.go b/azurerm/resource_arm_servicebus_queue.go index e9f7e0e5ff62..30f9e23a358e 100644 --- a/azurerm/resource_arm_servicebus_queue.go +++ b/azurerm/resource_arm_servicebus_queue.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -106,13 +107,14 @@ func resourceArmServiceBusQueue() *schema.Resource { Optional: true, }, - // TODO: remove these in the next major release + // TODO: remove this in 2.0 "enable_batched_operations": { Type: schema.TypeBool, Optional: true, Deprecated: "This field has been removed by Azure.", }, + // TODO: remove this in 2.0 "support_ordering": { Type: schema.TypeBool, Optional: true, @@ -146,6 +148,19 @@ func resourceArmServiceBusQueueCreateUpdate(d *schema.ResourceData, meta interfa requiresSession := d.Get("requires_session").(bool) deadLetteringOnMessageExpiration := d.Get("dead_lettering_on_message_expiration").(bool) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, namespaceName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Namespace %q (Resource Group %q): %+v", resourceGroup, namespaceName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_service_fabric_cluster", *existing.ID) + } + } + parameters := servicebus.SBQueue{ Name: &name, SBQueueProperties: &servicebus.SBQueueProperties{ diff --git a/azurerm/resource_arm_servicebus_queue_authorization_rule.go b/azurerm/resource_arm_servicebus_queue_authorization_rule.go index fd4b93e9aaba..b154f16ea2ae 100644 --- a/azurerm/resource_arm_servicebus_queue_authorization_rule.go +++ b/azurerm/resource_arm_servicebus_queue_authorization_rule.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -61,6 +62,19 @@ func resourceArmServiceBusQueueAuthorizationRuleCreateUpdate(d *schema.ResourceD namespaceName := d.Get("namespace_name").(string) queueName := d.Get("queue_name").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, queueName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Queue Authorization Rule %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_servicebus_queue_authorization_rule", *existing.ID) + } + } + parameters := servicebus.SBAuthorizationRule{ Name: &name, SBAuthorizationRuleProperties: &servicebus.SBAuthorizationRuleProperties{ diff --git a/azurerm/resource_arm_servicebus_queue_authorization_rule_test.go b/azurerm/resource_arm_servicebus_queue_authorization_rule_test.go index f32d0182faa7..bfe9b028994b 100644 --- a/azurerm/resource_arm_servicebus_queue_authorization_rule_test.go +++ b/azurerm/resource_arm_servicebus_queue_authorization_rule_test.go @@ -62,6 +62,7 @@ func testAccAzureRMServiceBusQueueAuthorizationRule(t *testing.T, listen, send, func TestAccAzureRMServiceBusQueueAuthorizationRule_rightsUpdate(t *testing.T) { resourceName := "azurerm_servicebus_queue_authorization_rule.test" + ri := tf.AccRandTimeInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -69,7 +70,7 @@ func TestAccAzureRMServiceBusQueueAuthorizationRule_rightsUpdate(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusQueueAuthorizationRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceBusQueueAuthorizationRule_base(tf.AccRandTimeInt(), testLocation(), true, false, false), + Config: testAccAzureRMServiceBusQueueAuthorizationRule_base(ri, testLocation(), true, false, false), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusQueueAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "listen", "true"), @@ -78,7 +79,7 @@ func TestAccAzureRMServiceBusQueueAuthorizationRule_rightsUpdate(t *testing.T) { ), }, { - Config: testAccAzureRMServiceBusQueueAuthorizationRule_base(tf.AccRandTimeInt(), testLocation(), true, true, true), + Config: testAccAzureRMServiceBusQueueAuthorizationRule_base(ri, testLocation(), true, true, true), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusQueueAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "name"), @@ -100,6 +101,35 @@ func TestAccAzureRMServiceBusQueueAuthorizationRule_rightsUpdate(t *testing.T) { }, }) } +func TestAccAzureRMServiceBusQueueAuthorizationRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_queue_authorization_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusQueueAuthorizationRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusQueueAuthorizationRule_base(ri, testLocation(), true, false, false), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusQueueAuthorizationRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "listen", "true"), + resource.TestCheckResourceAttr(resourceName, "send", "false"), + resource.TestCheckResourceAttr(resourceName, "manage", "false"), + ), + }, + { + Config: testAccAzureRMServiceBusQueueAuthorizationRule_requiresImport(ri, testLocation(), true, false, false), + ExpectError: testRequiresImportError("azurerm_servicebus_queue_authorization_rule"), + }, + }, + }) +} func testCheckAzureRMServiceBusQueueAuthorizationRuleDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*ArmClient).serviceBusQueuesClient @@ -192,3 +222,20 @@ resource "azurerm_servicebus_queue_authorization_rule" "test" { } `, rInt, location, listen, send, manage) } + +func testAccAzureRMServiceBusQueueAuthorizationRule_requiresImport(rInt int, location string, listen, send, manage bool) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_queue_authorization_rule" "import" { + name = "${azurerm_servicebus_queue_authorization_rule.test.name}" + namespace_name = "${azurerm_servicebus_queue_authorization_rule.test.namespace_name}" + queue_name = "${azurerm_servicebus_queue_authorization_rule.test.queue_name}" + resource_group_name = "${azurerm_servicebus_queue_authorization_rule.test.resource_group_name}" + + listen = "${azurerm_servicebus_queue_authorization_rule.test.listen}" + send = "${azurerm_servicebus_queue_authorization_rule.test.send}" + manage = "${azurerm_servicebus_queue_authorization_rule.test.manage}" +} +`, testAccAzureRMServiceBusQueueAuthorizationRule_base(rInt, location, listen, send, manage)) +} diff --git a/azurerm/resource_arm_servicebus_queue_test.go b/azurerm/resource_arm_servicebus_queue_test.go index e803bce7a91e..4f4e17aba940 100644 --- a/azurerm/resource_arm_servicebus_queue_test.go +++ b/azurerm/resource_arm_servicebus_queue_test.go @@ -14,7 +14,6 @@ import ( func TestAccAzureRMServiceBusQueue_basic(t *testing.T) { resourceName := "azurerm_servicebus_queue.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusQueue_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +21,7 @@ func TestAccAzureRMServiceBusQueue_basic(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusQueueDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusQueue_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusQueueExists(resourceName), resource.TestCheckResourceAttr(resourceName, "enable_express", "false"), @@ -37,6 +36,34 @@ func TestAccAzureRMServiceBusQueue_basic(t *testing.T) { }, }) } +func TestAccAzureRMServiceBusQueue_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_queue.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusQueueDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusQueue_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusQueueExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_express", "false"), + resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "false"), + ), + }, + { + Config: testAccAzureRMServiceBusQueue_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_service_fabric_cluster"), + }, + }, + }) +} func TestAccAzureRMServiceBusQueue_update(t *testing.T) { resourceName := "azurerm_servicebus_queue.test" @@ -408,6 +435,18 @@ resource "azurerm_servicebus_queue" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMServiceBusQueue_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_queue" "import" { + name = "${azurerm_servicebus_queue.test.name}" + resource_group_name = "${azurerm_servicebus_queue.test.resource_group_name}" + namespace_name = "${azurerm_servicebus_queue.test.namespace_name}" +} +`, testAccAzureRMServiceBusQueue_basic(rInt, location)) +} + func testAccAzureRMServiceBusQueue_Premium(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_servicebus_subscription.go b/azurerm/resource_arm_servicebus_subscription.go index db6ecf737456..71c99e0e6b7a 100644 --- a/azurerm/resource_arm_servicebus_subscription.go +++ b/azurerm/resource_arm_servicebus_subscription.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -116,6 +117,19 @@ func resourceArmServiceBusSubscriptionCreateUpdate(d *schema.ResourceData, meta maxDeliveryCount := int32(d.Get("max_delivery_count").(int)) requiresSession := d.Get("requires_session").(bool) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, namespaceName, topicName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Subscription %q (resource group %q, namespace %q, topic %q): %v", name, resourceGroup, namespaceName, topicName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_servicebus_subscription", *existing.ID) + } + } + parameters := servicebus.SBSubscription{ SBSubscriptionProperties: &servicebus.SBSubscriptionProperties{ DeadLetteringOnMessageExpiration: &deadLetteringExpiration, @@ -142,12 +156,12 @@ func resourceArmServiceBusSubscriptionCreateUpdate(d *schema.ResourceData, meta } if _, err := client.CreateOrUpdate(ctx, resourceGroup, namespaceName, topicName, name, parameters); err != nil { - return err + return fmt.Errorf("Error issuing create/update request for ServiceBus Subscription %q (resource group %q, namespace %q, topic %q): %v", name, resourceGroup, namespaceName, topicName, err) } read, err := client.Get(ctx, resourceGroup, namespaceName, topicName, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for ServiceBus Subscription %q (resource group %q, namespace %q, topic %q): %v", name, resourceGroup, namespaceName, topicName, err) } if read.ID == nil { return fmt.Errorf("Cannot read ServiceBus Subscription %s (resource group %s) ID", name, resourceGroup) diff --git a/azurerm/resource_arm_servicebus_subscription_rule.go b/azurerm/resource_arm_servicebus_subscription_rule.go index fc62d26ac1b4..e1a3c83b317c 100644 --- a/azurerm/resource_arm_servicebus_subscription_rule.go +++ b/azurerm/resource_arm_servicebus_subscription_rule.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -130,6 +131,20 @@ func resourceArmServiceBusSubscriptionRuleCreateUpdate(d *schema.ResourceData, m namespaceName := d.Get("namespace_name").(string) resourceGroup := d.Get("resource_group_name").(string) filterType := d.Get("filter_type").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, namespaceName, topicName, subscriptionName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Service Bus Subscription %q (Resource Group %q, namespace %q): %+v", name, resourceGroup, namespaceName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_servicebus_subscription_rule", *existing.ID) + } + } + rule := servicebus.Rule{ Ruleproperties: &servicebus.Ruleproperties{ FilterType: servicebus.FilterType(filterType), @@ -159,12 +174,12 @@ func resourceArmServiceBusSubscriptionRuleCreateUpdate(d *schema.ResourceData, m } if _, err := client.CreateOrUpdate(ctx, resourceGroup, namespaceName, topicName, subscriptionName, name, rule); err != nil { - return err + return fmt.Errorf("Error issuing create/update request for Service Bus Subscription %q (Resource Group %q, namespace %q): %+v", name, resourceGroup, namespaceName, err) } read, err := client.Get(ctx, resourceGroup, namespaceName, topicName, subscriptionName, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for Service Bus Subscription %q (Resource Group %q, namespace %q): %+v", name, resourceGroup, namespaceName, err) } if read.ID == nil { return fmt.Errorf("Cannot read Service Bus Subscription Rule %s (resource group %s) ID", name, resourceGroup) diff --git a/azurerm/resource_arm_servicebus_subscription_rule_test.go b/azurerm/resource_arm_servicebus_subscription_rule_test.go index 7a881ae73266..6ef35206df5c 100644 --- a/azurerm/resource_arm_servicebus_subscription_rule_test.go +++ b/azurerm/resource_arm_servicebus_subscription_rule_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(t *testing.T) { resourceName := "azurerm_servicebus_subscription_rule.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusSubscriptionRuleExists(resourceName), ), @@ -29,11 +28,36 @@ func TestAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(t *testing.T) { }, }) } +func TestAccAzureRMServiceBusSubscriptionRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_subscription_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusSubscriptionRuleExists(resourceName), + ), + }, + { + Config: testAccAzureRMServiceBusSubscriptionRule_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_servicebus_subscription_rule"), + }, + }, + }) +} func TestAccAzureRMServiceBusSubscriptionRule_basicCorrelationFilter(t *testing.T) { resourceName := "azurerm_servicebus_subscription_rule.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusSubscriptionRule_basicCorrelationFilter(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -41,7 +65,7 @@ func TestAccAzureRMServiceBusSubscriptionRule_basicCorrelationFilter(t *testing. CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusSubscriptionRule_basicCorrelationFilter(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusSubscriptionRuleExists(resourceName), ), @@ -53,7 +77,6 @@ func TestAccAzureRMServiceBusSubscriptionRule_basicCorrelationFilter(t *testing. func TestAccAzureRMServiceBusSubscriptionRule_sqlFilterWithAction(t *testing.T) { resourceName := "azurerm_servicebus_subscription_rule.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusSubscriptionRule_sqlFilterWithAction(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -61,7 +84,7 @@ func TestAccAzureRMServiceBusSubscriptionRule_sqlFilterWithAction(t *testing.T) CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusSubscriptionRule_sqlFilterWithAction(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusSubscriptionRuleExists(resourceName), ), @@ -73,7 +96,6 @@ func TestAccAzureRMServiceBusSubscriptionRule_sqlFilterWithAction(t *testing.T) func TestAccAzureRMServiceBusSubscriptionRule_correlationFilterWithAction(t *testing.T) { resourceName := "azurerm_servicebus_subscription_rule.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusSubscriptionRule_correlationFilterWithAction(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -81,7 +103,7 @@ func TestAccAzureRMServiceBusSubscriptionRule_correlationFilterWithAction(t *tes CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusSubscriptionRule_correlationFilterWithAction(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusSubscriptionRuleExists(resourceName), ), @@ -229,6 +251,22 @@ resource "azurerm_servicebus_subscription_rule" "test" { `, template, rInt) } +func testAccAzureRMServiceBusSubscriptionRule_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_subscription_rule" "import" { + name = "${azurerm_servicebus_subscription_rule.test.name}" + namespace_name = "${azurerm_servicebus_subscription_rule.test.namespace_name}" + topic_name = "${azurerm_servicebus_subscription_rule.test.topic_name}" + subscription_name = "${azurerm_servicebus_subscription_rule.test.subscription_name}" + resource_group_name = "${azurerm_servicebus_subscription_rule.test.resource_group_name}" + filter_type = "${azurerm_servicebus_subscription_rule.test.filter_type}" + sql_filter = "${azurerm_servicebus_subscription_rule.test.sql_filter}" +} +`, testAccAzureRMServiceBusSubscriptionRule_basicSqlFilter(rInt, location)) +} + func testAccAzureRMServiceBusSubscriptionRule_basicSqlFilterUpdated(rInt int, location string) string { template := testAccAzureRMServiceBusSubscriptionRule_template(rInt, location) return fmt.Sprintf(` @@ -277,7 +315,7 @@ resource "azurerm_servicebus_subscription_rule" "test" { resource_group_name = "${azurerm_resource_group.test.name}" filter_type = "CorrelationFilter" - correlation_filter = { + correlation_filter { correlation_id = "test_correlation_id" message_id = "test_message_id" to = "test_to" @@ -304,7 +342,7 @@ resource "azurerm_servicebus_subscription_rule" "test" { resource_group_name = "${azurerm_resource_group.test.name}" filter_type = "CorrelationFilter" - correlation_filter = { + correlation_filter { correlation_id = "test_correlation_id" message_id = "test_message_id" } @@ -325,7 +363,7 @@ resource "azurerm_servicebus_subscription_rule" "test" { resource_group_name = "${azurerm_resource_group.test.name}" filter_type = "CorrelationFilter" - correlation_filter = { + correlation_filter { correlation_id = "test_correlation_id" message_id = "test_message_id_updated" reply_to = "test_reply_to_added" @@ -348,7 +386,7 @@ resource "azurerm_servicebus_subscription_rule" "test" { action = "SET Test='true'" filter_type = "CorrelationFilter" - correlation_filter = { + correlation_filter { correlation_id = "test_correlation_id" message_id = "test_message_id" } diff --git a/azurerm/resource_arm_servicebus_subscription_test.go b/azurerm/resource_arm_servicebus_subscription_test.go index a63acb27e4e6..414b4ea7ce2e 100644 --- a/azurerm/resource_arm_servicebus_subscription_test.go +++ b/azurerm/resource_arm_servicebus_subscription_test.go @@ -14,7 +14,6 @@ import ( func TestAccAzureRMServiceBusSubscription_basic(t *testing.T) { resourceName := "azurerm_servicebus_subscription.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusSubscription_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +21,7 @@ func TestAccAzureRMServiceBusSubscription_basic(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusSubscriptionDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusSubscription_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusSubscriptionExists(resourceName), ), @@ -36,6 +35,33 @@ func TestAccAzureRMServiceBusSubscription_basic(t *testing.T) { }) } +func TestAccAzureRMServiceBusSubscription_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_subscription.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusSubscriptionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusSubscription_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusSubscriptionExists(resourceName), + ), + }, + { + Config: testAccAzureRMServiceBusSubscription_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_servicebus_subscription"), + }, + }, + }) +} + func TestAccAzureRMServiceBusSubscription_defaultTtl(t *testing.T) { resourceName := "azurerm_servicebus_subscription.test" ri := tf.AccRandTimeInt() @@ -258,6 +284,20 @@ func testAccAzureRMServiceBusSubscription_basic(rInt int, location string) strin return fmt.Sprintf(testAccAzureRMServiceBusSubscription_tfTemplate, rInt, location, rInt, rInt, rInt, "") } +func testAccAzureRMServiceBusSubscription_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_subscription" "import" { + name = "${azurerm_servicebus_subscription.test.name}" + namespace_name = "${azurerm_servicebus_subscription.test.namespace_name}" + topic_name = "${azurerm_servicebus_subscription.test.topic_name}" + resource_group_name = "${azurerm_servicebus_subscription.test.resource_group_name}" + max_delivery_count = "${azurerm_servicebus_subscription.test.max_delivery_count}" +} +`, testAccAzureRMServiceBusSubscription_basic(rInt, location)) +} + func testAccAzureRMServiceBusSubscription_withDefaultTtl(rInt int, location string) string { return fmt.Sprintf(testAccAzureRMServiceBusSubscription_tfTemplate, rInt, location, rInt, rInt, rInt, "default_message_ttl = \"PT1H\"\n") diff --git a/azurerm/resource_arm_servicebus_topic.go b/azurerm/resource_arm_servicebus_topic.go index a24d57e4905f..e3e8ddde1f60 100644 --- a/azurerm/resource_arm_servicebus_topic.go +++ b/azurerm/resource_arm_servicebus_topic.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -133,6 +134,19 @@ func resourceArmServiceBusTopicCreateUpdate(d *schema.ResourceData, meta interfa requiresDuplicateDetection := d.Get("requires_duplicate_detection").(bool) supportOrdering := d.Get("support_ordering").(bool) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, namespaceName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Topic %q (resource group %q, namespace %q): %v", name, resourceGroup, namespaceName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_service_fabric_cluster", *existing.ID) + } + } + parameters := servicebus.SBTopic{ Name: &name, SBTopicProperties: &servicebus.SBTopicProperties{ @@ -159,12 +173,12 @@ func resourceArmServiceBusTopicCreateUpdate(d *schema.ResourceData, meta interfa } if _, err := client.CreateOrUpdate(ctx, resourceGroup, namespaceName, name, parameters); err != nil { - return err + return fmt.Errorf("Error issuing create/update request for ServiceBus Topic %q (resource group %q, namespace %q): %v", name, resourceGroup, namespaceName, err) } read, err := client.Get(ctx, resourceGroup, namespaceName, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for ServiceBus Topic %q (resource group %q, namespace %q): %v", name, resourceGroup, namespaceName, err) } if read.ID == nil { return fmt.Errorf("Cannot read ServiceBus Topic %s (resource group %s) ID", name, resourceGroup) diff --git a/azurerm/resource_arm_servicebus_topic_authorization_rule.go b/azurerm/resource_arm_servicebus_topic_authorization_rule.go index e20dae5c325a..6e42f9ba6cbe 100644 --- a/azurerm/resource_arm_servicebus_topic_authorization_rule.go +++ b/azurerm/resource_arm_servicebus_topic_authorization_rule.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -61,6 +62,19 @@ func resourceArmServiceBusTopicAuthorizationRuleCreateUpdate(d *schema.ResourceD topicName := d.Get("topic_name").(string) resourceGroup := d.Get("resource_group_name").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, topicName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing ServiceBus Topic Authorization Rule %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_servicebus_topic_authorization_rule", *existing.ID) + } + } + parameters := servicebus.SBAuthorizationRule{ Name: &name, SBAuthorizationRuleProperties: &servicebus.SBAuthorizationRuleProperties{ @@ -74,7 +88,7 @@ func resourceArmServiceBusTopicAuthorizationRuleCreateUpdate(d *schema.ResourceD read, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, topicName, name) if err != nil { - return err + return fmt.Errorf("Error getting ServiceBus Topic Authorization Rule %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { diff --git a/azurerm/resource_arm_servicebus_topic_authorization_rule_test.go b/azurerm/resource_arm_servicebus_topic_authorization_rule_test.go index 3d0eaab2de3d..15b73ec7f860 100644 --- a/azurerm/resource_arm_servicebus_topic_authorization_rule_test.go +++ b/azurerm/resource_arm_servicebus_topic_authorization_rule_test.go @@ -59,8 +59,39 @@ func testAccAzureRMServiceBusTopicAuthorizationRule(t *testing.T, listen, send, }) } +func TestAccAzureRMServiceBusTopicAuthorizationRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_topic_authorization_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusTopicAuthorizationRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusTopicAuthorizationRule_base(ri, testLocation(), true, false, false), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusTopicAuthorizationRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "listen", "true"), + resource.TestCheckResourceAttr(resourceName, "send", "false"), + resource.TestCheckResourceAttr(resourceName, "manage", "false"), + ), + }, + { + Config: testAccAzureRMServiceBusTopicAuthorizationRule_requiresImport(ri, testLocation(), true, false, false), + ExpectError: testRequiresImportError("azurerm_servicebus_topic_authorization_rule"), + }, + }, + }) +} + func TestAccAzureRMServiceBusTopicAuthorizationRule_rightsUpdate(t *testing.T) { resourceName := "azurerm_servicebus_topic_authorization_rule.test" + ri := tf.AccRandTimeInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -68,7 +99,7 @@ func TestAccAzureRMServiceBusTopicAuthorizationRule_rightsUpdate(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusTopicAuthorizationRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMServiceBusTopicAuthorizationRule_base(tf.AccRandTimeInt(), testLocation(), true, false, false), + Config: testAccAzureRMServiceBusTopicAuthorizationRule_base(ri, testLocation(), true, false, false), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusTopicAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "listen", "true"), @@ -77,7 +108,7 @@ func TestAccAzureRMServiceBusTopicAuthorizationRule_rightsUpdate(t *testing.T) { ), }, { - Config: testAccAzureRMServiceBusTopicAuthorizationRule_base(tf.AccRandTimeInt(), testLocation(), true, true, true), + Config: testAccAzureRMServiceBusTopicAuthorizationRule_base(ri, testLocation(), true, true, true), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusTopicAuthorizationRuleExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "name"), @@ -188,3 +219,20 @@ resource "azurerm_servicebus_topic_authorization_rule" "test" { } `, rInt, location, listen, send, manage) } + +func testAccAzureRMServiceBusTopicAuthorizationRule_requiresImport(rInt int, location string, listen, send, manage bool) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_topic_authorization_rule" "import" { + name = "${azurerm_servicebus_topic_authorization_rule.test.name}" + namespace_name = "${azurerm_servicebus_topic_authorization_rule.test.namespace_name}" + resource_group_name = "${azurerm_servicebus_topic_authorization_rule.test.resource_group_name}" + topic_name = "${azurerm_servicebus_topic_authorization_rule.test.topic_name}" + + listen = "${azurerm_servicebus_topic_authorization_rule.test.listen}" + send = "${azurerm_servicebus_topic_authorization_rule.test.send}" + manage = "${azurerm_servicebus_topic_authorization_rule.test.manage}" +} +`, testAccAzureRMServiceBusTopicAuthorizationRule_base(rInt, location, listen, send, manage)) +} diff --git a/azurerm/resource_arm_servicebus_topic_test.go b/azurerm/resource_arm_servicebus_topic_test.go index 574e436a8b35..3d57e3dfc2e3 100644 --- a/azurerm/resource_arm_servicebus_topic_test.go +++ b/azurerm/resource_arm_servicebus_topic_test.go @@ -14,7 +14,6 @@ import ( func TestAccAzureRMServiceBusTopic_basic(t *testing.T) { resourceName := "azurerm_servicebus_topic.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusTopic_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +21,7 @@ func TestAccAzureRMServiceBusTopic_basic(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusTopic_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusTopicExists(resourceName), ), @@ -35,11 +34,36 @@ func TestAccAzureRMServiceBusTopic_basic(t *testing.T) { }, }) } +func TestAccAzureRMServiceBusTopic_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_servicebus_topic.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMServiceBusTopic_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusTopicExists(resourceName), + ), + }, + { + Config: testAccAzureRMServiceBusTopic_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_service_fabric_cluster"), + }, + }, + }) +} func TestAccAzureRMServiceBusTopic_basicDisabled(t *testing.T) { resourceName := "azurerm_servicebus_topic.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMServiceBusTopic_basicDisabled(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -47,7 +71,7 @@ func TestAccAzureRMServiceBusTopic_basicDisabled(t *testing.T) { CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMServiceBusTopic_basicDisabled(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMServiceBusTopicExists(resourceName), ), @@ -337,6 +361,18 @@ resource "azurerm_servicebus_topic" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMServiceBusTopic_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_servicebus_topic" "import" { + name = "${azurerm_servicebus_topic.test.name}" + namespace_name = "${azurerm_servicebus_topic.test.namespace_name}" + resource_group_name = "${azurerm_servicebus_topic.test.resource_group_name}" +} +`, testAccAzureRMServiceBusTopic_basic(rInt, location)) +} + func testAccAzureRMServiceBusTopic_basicDisabled(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_shared_image.go b/azurerm/resource_arm_shared_image.go index 5bfd4a67cfb8..460fdedf811b 100644 --- a/azurerm/resource_arm_shared_image.go +++ b/azurerm/resource_arm_shared_image.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -103,7 +104,6 @@ func resourceArmSharedImageCreateUpdate(d *schema.ResourceData, meta interface{} log.Printf("[INFO] preparing arguments for Shared Image creation.") - // TODO: support for Timeouts/Requiring Import name := d.Get("name").(string) galleryName := d.Get("gallery_name").(string) resourceGroup := d.Get("resource_group_name").(string) @@ -117,6 +117,19 @@ func resourceArmSharedImageCreateUpdate(d *schema.ResourceData, meta interface{} osType := d.Get("os_type").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, galleryName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Shared Image %q (Gallery %q / Resource Group %q): %+v", name, galleryName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_shared_image", *existing.ID) + } + } + identifier := expandGalleryImageIdentifier(d) image := compute.GalleryImage{ diff --git a/azurerm/resource_arm_shared_image_gallery.go b/azurerm/resource_arm_shared_image_gallery.go index 25737a40f625..a0c18f622134 100644 --- a/azurerm/resource_arm_shared_image_gallery.go +++ b/azurerm/resource_arm_shared_image_gallery.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -61,6 +62,19 @@ func resourceArmSharedImageGalleryCreateUpdate(d *schema.ResourceData, meta inte description := d.Get("description").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Shared Image Gallery %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_shared_image_gallery", *existing.ID) + } + } + gallery := compute.Gallery{ Location: utils.String(location), GalleryProperties: &compute.GalleryProperties{ diff --git a/azurerm/resource_arm_shared_image_gallery_test.go b/azurerm/resource_arm_shared_image_gallery_test.go index 8698cd8da8cf..b0b07d2c067d 100644 --- a/azurerm/resource_arm_shared_image_gallery_test.go +++ b/azurerm/resource_arm_shared_image_gallery_test.go @@ -36,11 +36,37 @@ func TestAccAzureRMSharedImageGallery_basic(t *testing.T) { }, }) } +func TestAccAzureRMSharedImageGallery_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_shared_image_gallery.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSharedImageGalleryDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSharedImageGallery_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSharedImageGalleryExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", ""), + ), + }, + { + Config: testAccAzureRMSharedImageGallery_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_shared_image_gallery"), + }, + }, + }) +} func TestAccAzureRMSharedImageGallery_complete(t *testing.T) { resourceName := "azurerm_shared_image_gallery.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -48,7 +74,7 @@ func TestAccAzureRMSharedImageGallery_complete(t *testing.T) { CheckDestroy: testCheckAzureRMSharedImageGalleryDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMSharedImageGallery_complete(ri, location), + Config: testAccAzureRMSharedImageGallery_complete(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSharedImageGalleryExists(resourceName), resource.TestCheckResourceAttr(resourceName, "description", "Shared images and things."), @@ -139,6 +165,18 @@ resource "azurerm_shared_image_gallery" "test" { `, rInt, location, rInt) } +func testAccAzureRMSharedImageGallery_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_shared_image_gallery" "import" { + name = "${azurerm_shared_image_gallery.test.name}" + resource_group_name = "${azurerm_shared_image_gallery.test.resource_group_name}" + location = "${azurerm_shared_image_gallery.test.location}" +} +`, testAccAzureRMSharedImageGallery_basic(rInt, location)) +} + func testAccAzureRMSharedImageGallery_complete(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -152,7 +190,7 @@ resource "azurerm_shared_image_gallery" "test" { location = "${azurerm_resource_group.test.location}" description = "Shared images and things." - tags { + tags = { Hello = "There" World = "Example" } diff --git a/azurerm/resource_arm_shared_image_test.go b/azurerm/resource_arm_shared_image_test.go index 6a27980cfc99..50e44af12ce9 100644 --- a/azurerm/resource_arm_shared_image_test.go +++ b/azurerm/resource_arm_shared_image_test.go @@ -14,7 +14,6 @@ import ( func TestAccAzureRMSharedImage_basic(t *testing.T) { resourceName := "azurerm_shared_image.test" ri := tf.AccRandTimeInt() - location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +21,7 @@ func TestAccAzureRMSharedImage_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSharedImageDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMSharedImage_basic(ri, location), + Config: testAccAzureRMSharedImage_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSharedImageExists(resourceName), resource.TestCheckResourceAttr(resourceName, "description", ""), @@ -36,8 +35,12 @@ func TestAccAzureRMSharedImage_basic(t *testing.T) { }, }) } +func TestAccAzureRMSharedImage_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } -func TestAccAzureRMSharedImage_complete(t *testing.T) { resourceName := "azurerm_shared_image.test" ri := tf.AccRandTimeInt() location := testLocation() @@ -48,7 +51,31 @@ func TestAccAzureRMSharedImage_complete(t *testing.T) { CheckDestroy: testCheckAzureRMSharedImageDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMSharedImage_complete(ri, location), + Config: testAccAzureRMSharedImage_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSharedImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", ""), + ), + }, + { + Config: testAccAzureRMSharedImage_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_shared_image"), + }, + }, + }) +} + +func TestAccAzureRMSharedImage_complete(t *testing.T) { + resourceName := "azurerm_shared_image.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSharedImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSharedImage_complete(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSharedImageExists(resourceName), resource.TestCheckResourceAttr(resourceName, "os_type", "Linux"), @@ -156,6 +183,26 @@ resource "azurerm_shared_image" "test" { `, rInt, location, rInt, rInt, rInt, rInt, rInt) } +func testAccAzureRMSharedImage_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_shared_image" "import" { + name = "${azurerm_shared_image.test.name}" + gallery_name = "${azurerm_shared_image.test.gallery_name}" + resource_group_name = "${azurerm_shared_image.test.resource_group_name}" + location = "${azurerm_shared_image.test.location}" + os_type = "${azurerm_shared_image.test.os_type}" + + identifier { + publisher = "AccTesPublisher%d" + offer = "AccTesOffer%d" + sku = "AccTesSku%d" + } +} +`, testAccAzureRMSharedImage_basic(rInt, location), rInt, rInt, rInt) +} + func testAccAzureRMSharedImage_complete(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_shared_image_version.go b/azurerm/resource_arm_shared_image_version.go index 31735cc60b15..416330107678 100644 --- a/azurerm/resource_arm_shared_image_version.go +++ b/azurerm/resource_arm_shared_image_version.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -96,6 +97,19 @@ func resourceArmSharedImageVersionCreateUpdate(d *schema.ResourceData, meta inte managedImageId := d.Get("managed_image_id").(string) excludeFromLatest := d.Get("exclude_from_latest").(bool) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, galleryName, imageName, imageVersion, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Shared Image Version %q (Image %q / Gallery %q / Resource Group %q): %+v", imageVersion, imageName, galleryName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_shared_image_version", *existing.ID) + } + } + targetRegions := expandSharedImageVersionTargetRegions(d) tags := d.Get("tags").(map[string]interface{}) diff --git a/azurerm/resource_arm_shared_image_version_test.go b/azurerm/resource_arm_shared_image_version_test.go index b5321a8b803e..6d577cfef795 100644 --- a/azurerm/resource_arm_shared_image_version_test.go +++ b/azurerm/resource_arm_shared_image_version_test.go @@ -20,8 +20,6 @@ func TestAccAzureRMSharedImageVersion_basic(t *testing.T) { password := "Password1234!" hostName := fmt.Sprintf("tftestcustomimagesrc%d", ri) sshPort := "22" - location := testLocation() - altLocation := testAltLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -30,15 +28,15 @@ func TestAccAzureRMSharedImageVersion_basic(t *testing.T) { Steps: []resource.TestStep{ { // need to create a vm and then reference it in the image creation - Config: testAccAzureRMSharedImageVersion_setup(ri, location, userName, password, hostName), + Config: testAccAzureRMSharedImageVersion_setup(ri, testLocation(), userName, password, hostName), Destroy: false, Check: resource.ComposeTestCheckFunc( testCheckAzureVMExists("azurerm_virtual_machine.testsource", true), - testGeneralizeVMImage(resourceGroup, "testsource", userName, password, hostName, sshPort, location), + testGeneralizeVMImage(resourceGroup, "testsource", userName, password, hostName, sshPort, testLocation()), ), }, { - Config: testAccAzureRMSharedImageVersion_imageVersion(ri, location, userName, password, hostName), + Config: testAccAzureRMSharedImageVersion_imageVersion(ri, testLocation(), userName, password, hostName), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSharedImageVersionExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "managed_image_id"), @@ -46,7 +44,7 @@ func TestAccAzureRMSharedImageVersion_basic(t *testing.T) { ), }, { - Config: testAccAzureRMSharedImageVersion_imageVersionUpdated(ri, location, altLocation, userName, password, hostName), + Config: testAccAzureRMSharedImageVersion_imageVersionUpdated(ri, testLocation(), testAltLocation(), userName, password, hostName), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSharedImageVersionExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "managed_image_id"), @@ -62,6 +60,49 @@ func TestAccAzureRMSharedImageVersion_basic(t *testing.T) { }, }) } +func TestAccAzureRMSharedImageVersion_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_shared_image_version.test" + + ri := tf.AccRandTimeInt() + resourceGroup := fmt.Sprintf("acctestRG-%d", ri) + userName := "testadmin" + password := "Password1234!" + hostName := fmt.Sprintf("tftestcustomimagesrc%d", ri) + sshPort := "22" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSharedImageVersionDestroy, + Steps: []resource.TestStep{ + { + // need to create a vm and then reference it in the image creation + Config: testAccAzureRMSharedImageVersion_setup(ri, testLocation(), userName, password, hostName), + Destroy: false, + Check: resource.ComposeTestCheckFunc( + testCheckAzureVMExists("azurerm_virtual_machine.testsource", true), + testGeneralizeVMImage(resourceGroup, "testsource", userName, password, hostName, sshPort, testLocation()), + ), + }, + { + Config: testAccAzureRMSharedImageVersion_imageVersion(ri, testLocation(), userName, password, hostName), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSharedImageVersionExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "managed_image_id"), + resource.TestCheckResourceAttr(resourceName, "target_region.#", "1"), + ), + }, + { + Config: testAccAzureRMSharedImageVersion_requiresImport(ri, testLocation(), userName, password, hostName), + ExpectError: testRequiresImportError("azurerm_shared_image_version"), + }, + }, + }) +} func testCheckAzureRMSharedImageVersionDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*ArmClient).galleryImageVersionsClient @@ -175,6 +216,25 @@ resource "azurerm_shared_image_version" "test" { } `, template) } +func testAccAzureRMSharedImageVersion_requiresImport(rInt int, location, username, password, hostname string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_shared_image_version" "import" { + name = "${azurerm_shared_image_version.test.name}" + gallery_name = "${azurerm_shared_image_version.test.gallery_name}" + image_name = "${azurerm_shared_image_version.test.image_name}" + resource_group_name = "${azurerm_shared_image_version.test.resource_group_name}" + location = "${azurerm_shared_image_version.test.location}" + managed_image_id = "${azurerm_shared_image_version.test.managed_image_id}" + + target_region { + name = "${azurerm_resource_group.test.location}" + regional_replica_count = 1 + } +} +`, testAccAzureRMSharedImageVersion_imageVersion(rInt, location, username, password, hostname)) +} func testAccAzureRMSharedImageVersion_imageVersionUpdated(rInt int, location, altLocation, username, password, hostname string) string { template := testAccAzureRMSharedImageVersion_provision(rInt, location, username, password, hostname) diff --git a/azurerm/resource_arm_signalr_service.go b/azurerm/resource_arm_signalr_service.go index f3af9a04dbc8..d5276f4e7f44 100644 --- a/azurerm/resource_arm_signalr_service.go +++ b/azurerm/resource_arm_signalr_service.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -120,6 +121,19 @@ func resourceArmSignalRServiceCreateUpdate(d *schema.ResourceData, meta interfac tags := d.Get("tags").(map[string]interface{}) expandedTags := expandTags(tags) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SignalR %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_signalr_service", *existing.ID) + } + } + parameters := &signalr.CreateParameters{ Location: utils.String(location), Sku: expandSignalRServiceSku(sku), diff --git a/azurerm/resource_arm_signalr_service_test.go b/azurerm/resource_arm_signalr_service_test.go index c33c203d1872..70d28700d354 100644 --- a/azurerm/resource_arm_signalr_service_test.go +++ b/azurerm/resource_arm_signalr_service_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMSignalRService_basic(t *testing.T) { resourceName := "azurerm_signalr_service.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSignalRService_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMSignalRService_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSignalRServiceDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSignalRService_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSignalRServiceExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "Free_F1"), @@ -44,11 +43,46 @@ func TestAccAzureRMSignalRService_basic(t *testing.T) { }, }) } +func TestAccAzureRMSignalRService_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_signalr_service.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSignalRServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSignalRService_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSignalRServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "sku.0.name", "Free_F1"), + resource.TestCheckResourceAttr(resourceName, "sku.0.capacity", "1"), + resource.TestCheckResourceAttrSet(resourceName, "hostname"), + resource.TestCheckResourceAttrSet(resourceName, "ip_address"), + resource.TestCheckResourceAttrSet(resourceName, "public_port"), + resource.TestCheckResourceAttrSet(resourceName, "server_port"), + resource.TestCheckResourceAttrSet(resourceName, "primary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "primary_connection_string"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_access_key"), + resource.TestCheckResourceAttrSet(resourceName, "secondary_connection_string"), + ), + }, + { + Config: testAccAzureRMSignalRService_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_signalr_service"), + }, + }, + }) +} func TestAccAzureRMSignalRService_standard(t *testing.T) { resourceName := "azurerm_signalr_service.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSignalRService_standardWithCapacity(ri, testLocation(), 1) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -56,7 +90,7 @@ func TestAccAzureRMSignalRService_standard(t *testing.T) { CheckDestroy: testCheckAzureRMSignalRServiceDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSignalRService_standardWithCapacity(ri, testLocation(), 1), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSignalRServiceExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "Standard_S1"), @@ -83,7 +117,6 @@ func TestAccAzureRMSignalRService_standard(t *testing.T) { func TestAccAzureRMSignalRService_standardWithCap2(t *testing.T) { resourceName := "azurerm_signalr_service.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSignalRService_standardWithCapacity(ri, testLocation(), 2) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -91,7 +124,7 @@ func TestAccAzureRMSignalRService_standardWithCap2(t *testing.T) { CheckDestroy: testCheckAzureRMSignalRServiceDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSignalRService_standardWithCapacity(ri, testLocation(), 2), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSignalRServiceExists(resourceName), resource.TestCheckResourceAttr(resourceName, "sku.0.name", "Standard_S1"), @@ -327,6 +360,23 @@ resource "azurerm_signalr_service" "test" { `, rInt, location, rInt) } +func testAccAzureRMSignalRService_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_signalr_service" "import" { + name = "${azurerm_signalr_service.test.name}" + location = "${azurerm_signalr_service.test.location}" + resource_group_name = "${azurerm_signalr_service.test.resource_group_name}" + + sku { + name = "Free_F1" + capacity = 1 + } +} +`, testAccAzureRMSignalRService_basic(rInt, location)) +} + func testAccAzureRMSignalRService_standardWithCapacity(rInt int, location string, capacity int) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_snapshot.go b/azurerm/resource_arm_snapshot.go index 65dddf4c2d56..99eb68e0e59b 100644 --- a/azurerm/resource_arm_snapshot.go +++ b/azurerm/resource_arm_snapshot.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -84,6 +85,19 @@ func resourceArmSnapshotCreateUpdate(d *schema.ResourceData, meta interface{}) e createOption := d.Get("create_option").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Snapshot %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_snapshot", *existing.ID) + } + } + properties := compute.Snapshot{ Location: utils.String(location), SnapshotProperties: &compute.SnapshotProperties{ @@ -119,16 +133,16 @@ func resourceArmSnapshotCreateUpdate(d *schema.ResourceData, meta interface{}) e future, err := client.CreateOrUpdate(ctx, resourceGroup, name, properties) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for Snapshot %q (Resource Group %q): %+v", name, resourceGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return err + return fmt.Errorf("Error waiting on create/update future for Snapshot %q (Resource Group %q): %+v", name, resourceGroup, err) } resp, err := client.Get(ctx, resourceGroup, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for Snapshot %q (Resource Group %q): %+v", name, resourceGroup, err) } d.SetId(*resp.ID) diff --git a/azurerm/resource_arm_snapshot_test.go b/azurerm/resource_arm_snapshot_test.go index c342207d737e..0ede0ed26da3 100644 --- a/azurerm/resource_arm_snapshot_test.go +++ b/azurerm/resource_arm_snapshot_test.go @@ -59,11 +59,9 @@ func TestSnapshotName_validation(t *testing.T) { } } } - func TestAccAzureRMSnapshot_fromManagedDisk(t *testing.T) { resourceName := "azurerm_snapshot.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSnapshot_fromManagedDisk(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -71,7 +69,7 @@ func TestAccAzureRMSnapshot_fromManagedDisk(t *testing.T) { CheckDestroy: testCheckAzureRMSnapshotDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSnapshot_fromManagedDisk(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSnapshotExists(resourceName), ), @@ -85,6 +83,32 @@ func TestAccAzureRMSnapshot_fromManagedDisk(t *testing.T) { }, }) } +func TestAccAzureRMSnapshot_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_snapshot.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSnapshot_fromManagedDisk(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSnapshotExists(resourceName), + ), + }, + { + Config: testAccAzureRMSnapshot_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_snapshot"), + }, + }, + }) +} func TestAccAzureRMSnapshot_encryption(t *testing.T) { resourceName := "azurerm_snapshot.test" @@ -282,6 +306,20 @@ resource "azurerm_snapshot" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMSnapshot_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_snapshot" "import" { + name = "${azurerm_snapshot.test.name}" + location = "${azurerm_snapshot.test.location}" + resource_group_name = "${azurerm_snapshot.test.resource_group_name}" + create_option = "${azurerm_snapshot.test.create_option}" + source_uri = "${azurerm_snapshot.test.source_uri}" +} +`, testAccAzureRMSnapshot_fromManagedDisk(rInt, location)) +} + func testAccAzureRMSnapshot_fromManagedDiskUpdated(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -305,7 +343,7 @@ resource "azurerm_snapshot" "test" { create_option = "Copy" source_uri = "${azurerm_managed_disk.test.id}" - tags { + tags = { "Hello" = "World" } } @@ -508,7 +546,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -552,7 +590,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_sql_administrator.go b/azurerm/resource_arm_sql_administrator.go index f2e2d8125c07..08172e75e328 100644 --- a/azurerm/resource_arm_sql_administrator.go +++ b/azurerm/resource_arm_sql_administrator.go @@ -6,7 +6,8 @@ import ( "github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql" "github.com/hashicorp/terraform/helper/schema" - "github.com/satori/go.uuid" + uuid "github.com/satori/go.uuid" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -58,6 +59,20 @@ func resourceArmSqlActiveDirectoryAdministratorCreateUpdate(d *schema.ResourceDa login := d.Get("login").(string) objectId := uuid.FromStringOrNil(d.Get("object_id").(string)) tenantId := uuid.FromStringOrNil(d.Get("tenant_id").(string)) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, serverName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SQL Administrator (Resource Group %q, Server %q): %+v", resGroup, serverName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_sql_active_directory_administrator", *existing.ID) + } + } + parameters := sql.ServerAzureADAdministrator{ ServerAdministratorProperties: &sql.ServerAdministratorProperties{ AdministratorType: utils.String("ActiveDirectory"), @@ -69,16 +84,16 @@ func resourceArmSqlActiveDirectoryAdministratorCreateUpdate(d *schema.ResourceDa future, err := client.CreateOrUpdate(ctx, resGroup, serverName, parameters) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for SQL Administrator (Resource Group %q, Server %q): %+v", resGroup, serverName, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return err + return fmt.Errorf("Error waiting on create/update future for SQL Administrator (Resource Group %q, Server %q): %+v", resGroup, serverName, err) } resp, err := client.Get(ctx, resGroup, serverName) if err != nil { - return err + return fmt.Errorf("Error issuing get request for SQL Administrator (Resource Group %q, Server %q): %+v", resGroup, serverName, err) } d.SetId(*resp.ID) diff --git a/azurerm/resource_arm_sql_administrator_test.go b/azurerm/resource_arm_sql_administrator_test.go index c47f0bd3d99c..9ee58735a84f 100644 --- a/azurerm/resource_arm_sql_administrator_test.go +++ b/azurerm/resource_arm_sql_administrator_test.go @@ -13,8 +13,6 @@ import ( func TestAccAzureRMSqlAdministrator_basic(t *testing.T) { resourceName := "azurerm_sql_active_directory_administrator.test" ri := tf.AccRandTimeInt() - preConfig := testAccAzureRMSqlAdministrator_basic(ri, testLocation()) - postConfig := testAccAzureRMSqlAdministrator_withUpdates(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +20,7 @@ func TestAccAzureRMSqlAdministrator_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSqlAdministratorDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccAzureRMSqlAdministrator_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlAdministratorExists(resourceName), resource.TestCheckResourceAttr(resourceName, "login", "sqladmin"), @@ -34,7 +32,7 @@ func TestAccAzureRMSqlAdministrator_basic(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccAzureRMSqlAdministrator_withUpdates(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlAdministratorExists(resourceName), resource.TestCheckResourceAttr(resourceName, "login", "sqladmin2"), @@ -43,6 +41,33 @@ func TestAccAzureRMSqlAdministrator_basic(t *testing.T) { }, }) } +func TestAccAzureRMSqlAdministrator_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_sql_active_directory_administrator.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSqlAdministratorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSqlAdministrator_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSqlAdministratorExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "login", "sqladmin"), + ), + }, + { + Config: testAccAzureRMSqlAdministrator_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_sql_active_directory_administrator"), + }, + }, + }) +} func TestAccAzureRMSqlAdministrator_disappears(t *testing.T) { resourceName := "azurerm_sql_active_directory_administrator.test" @@ -160,6 +185,20 @@ resource "azurerm_sql_active_directory_administrator" "test" { `, rInt, location, rInt) } +func testAccAzureRMSqlAdministrator_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_active_directory_administrator" "import" { + server_name = "${azurerm_sql_active_directory_administrator.test.server_name}" + resource_group_name = "${azurerm_sql_active_directory_administrator.test.resource_group_name}" + login = "${azurerm_sql_active_directory_administrator.test.login}" + tenant_id = "${azurerm_sql_active_directory_administrator.test.tenant_id}" + object_id = "${azurerm_sql_active_directory_administrator.test.object_id}" +} +`, testAccAzureRMSqlAdministrator_basic(rInt, location)) +} + func testAccAzureRMSqlAdministrator_withUpdates(rInt int, location string) string { return fmt.Sprintf(` data "azurerm_client_config" "current" {} diff --git a/azurerm/resource_arm_sql_database.go b/azurerm/resource_arm_sql_database.go index 1a0d60374801..55dd2c73df02 100644 --- a/azurerm/resource_arm_sql_database.go +++ b/azurerm/resource_arm_sql_database.go @@ -6,13 +6,14 @@ import ( "strings" "time" + "github.com/satori/go.uuid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql" "github.com/Azure/go-autorest/autorest/date" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" - "github.com/satori/go.uuid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -316,15 +317,28 @@ func resourceArmSqlDatabase() *schema.Resource { func resourceArmSqlDatabaseCreateUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*ArmClient).sqlDatabasesClient + ctx := meta.(*ArmClient).StopContext name := d.Get("name").(string) serverName := d.Get("server_name").(string) resourceGroup := d.Get("resource_group_name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) createMode := d.Get("create_mode").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serverName, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SQL Database %q (Resource Group %q, Server %q): %+v", name, resourceGroup, serverName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_sql_database", *existing.ID) + } + } + threatDetection, err := expandArmSqlServerThreatDetectionPolicy(d, location) if err != nil { return fmt.Errorf("Error parsing the database threat detection policy: %+v", err) @@ -406,14 +420,13 @@ func resourceArmSqlDatabaseCreateUpdate(d *schema.ResourceData, meta interface{} properties.DatabaseProperties.RequestedServiceObjectiveID = nil } - ctx := meta.(*ArmClient).StopContext future, err := client.CreateOrUpdate(ctx, resourceGroup, serverName, name, properties) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for SQL Database %q (Resource Group %q, Server %q): %+v", name, resourceGroup, serverName, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return err + return fmt.Errorf("Error waiting on create/update future for SQL Database %q (Resource Group %q, Server %q): %+v", name, resourceGroup, serverName, err) } if _, ok := d.GetOk("import"); ok { @@ -438,7 +451,7 @@ func resourceArmSqlDatabaseCreateUpdate(d *schema.ResourceData, meta interface{} resp, err := client.Get(ctx, resourceGroup, serverName, name, "") if err != nil { - return err + return fmt.Errorf("Error issuing get request for SQL Database %q (Resource Group %q, Server %q): %+v", name, resourceGroup, serverName, err) } d.SetId(*resp.ID) diff --git a/azurerm/resource_arm_sql_database_test.go b/azurerm/resource_arm_sql_database_test.go index 52d6e3e0129b..6a3522da6e7c 100644 --- a/azurerm/resource_arm_sql_database_test.go +++ b/azurerm/resource_arm_sql_database_test.go @@ -35,6 +35,32 @@ func TestAccAzureRMSqlDatabase_basic(t *testing.T) { }, }) } +func TestAccAzureRMSqlDatabase_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_sql_database.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSqlDatabaseDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSqlDatabase_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSqlDatabaseExists(resourceName), + ), + }, + { + Config: testAccAzureRMSqlDatabase_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_sql_database"), + }, + }, + }) +} func TestAccAzureRMSqlDatabase_disappears(t *testing.T) { resourceName := "azurerm_sql_database.test" @@ -144,7 +170,7 @@ func TestAccAzureRMSqlDatabase_restorePointInTime(t *testing.T) { location := testLocation() preConfig := testAccAzureRMSqlDatabase_basic(ri, location) timeToRestore := time.Now().Add(15 * time.Minute) - formattedTime := string(timeToRestore.UTC().Format(time.RFC3339)) + formattedTime := timeToRestore.UTC().Format(time.RFC3339) postCongif := testAccAzureRMSqlDatabase_restorePointInTime(ri, formattedTime, testLocation()) resource.ParallelTest(t, resource.TestCase{ @@ -399,6 +425,23 @@ resource "azurerm_sql_database" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMSqlDatabase_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_database" "import" { + name = "${azurerm_sql_database.test.name}" + resource_group_name = "${azurerm_sql_database.test.resource_group_name}" + server_name = "${azurerm_sql_database.test.server_name}" + location = "${azurerm_sql_database.test.location}" + edition = "${azurerm_sql_database.test.edition}" + collation = "${azurerm_sql_database.test.collation}" + max_size_bytes = "${azurerm_sql_database.test.max_size_bytes}" + requested_service_objective_name = "${azurerm_sql_database.test.requested_service_objective_name}" +} +`, testAccAzureRMSqlDatabase_basic(rInt, location)) +} + func testAccAzureRMSqlDatabase_withTags(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -425,7 +468,7 @@ resource "azurerm_sql_database" "test" { max_size_bytes = "1073741824" requested_service_objective_name = "S0" - tags { + tags = { environment = "staging" database = "test" } @@ -459,7 +502,7 @@ resource "azurerm_sql_database" "test" { max_size_bytes = "1073741824" requested_service_objective_name = "S0" - tags { + tags = { environment = "production" } } diff --git a/azurerm/resource_arm_sql_elasticpool.go b/azurerm/resource_arm_sql_elasticpool.go index 41c5a4360b67..6aa3ff035e5b 100644 --- a/azurerm/resource_arm_sql_elasticpool.go +++ b/azurerm/resource_arm_sql_elasticpool.go @@ -6,6 +6,7 @@ import ( "time" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql" "github.com/hashicorp/terraform/helper/schema" @@ -94,6 +95,19 @@ func resourceArmSqlElasticPoolCreateUpdate(d *schema.ResourceData, meta interfac resGroup := d.Get("resource_group_name").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, serverName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SQL ElasticPool %q (resource group %q, server %q) ID", name, serverName, resGroup) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_sql_elasticpool", *existing.ID) + } + } + elasticPool := sql.ElasticPool{ Name: &name, Location: &location, @@ -115,7 +129,7 @@ func resourceArmSqlElasticPoolCreateUpdate(d *schema.ResourceData, meta interfac return err } if read.ID == nil { - return fmt.Errorf("Cannot read SQL ElasticPool %q (resource group %q) ID", name, resGroup) + return fmt.Errorf("Cannot read SQL ElasticPool %q (resource group %q, server %q) ID", name, serverName, resGroup) } d.SetId(*read.ID) @@ -138,7 +152,7 @@ func resourceArmSqlElasticPoolRead(d *schema.ResourceData, meta interface{}) err d.SetId("") return nil } - return fmt.Errorf("Error making Read request on Sql Elastic Pool %s: %s", name, err) + return fmt.Errorf("Error making Read request on SQL ElasticPool %q (resource group %q, server %q) ID", name, serverName, resGroup) } d.Set("name", resp.Name) diff --git a/azurerm/resource_arm_sql_elasticpool_test.go b/azurerm/resource_arm_sql_elasticpool_test.go index 06eb80b4d1a8..a6d3bfb8a6d3 100644 --- a/azurerm/resource_arm_sql_elasticpool_test.go +++ b/azurerm/resource_arm_sql_elasticpool_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMSqlElasticPool_basic(t *testing.T) { resourceName := "azurerm_sql_elasticpool.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSqlElasticPool_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMSqlElasticPool_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSqlElasticPool_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlElasticPoolExists(resourceName), ), @@ -34,11 +33,36 @@ func TestAccAzureRMSqlElasticPool_basic(t *testing.T) { }, }) } +func TestAccAzureRMSqlElasticPool_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_sql_elasticpool.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSqlElasticPool_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSqlElasticPoolExists(resourceName), + ), + }, + { + Config: testAccAzureRMSqlElasticPool_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_sql_elasticpool"), + }, + }, + }) +} func TestAccAzureRMSqlElasticPool_disappears(t *testing.T) { resourceName := "azurerm_sql_elasticpool.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSqlElasticPool_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -46,7 +70,7 @@ func TestAccAzureRMSqlElasticPool_disappears(t *testing.T) { CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSqlElasticPool_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlElasticPoolExists(resourceName), testCheckAzureRMSqlElasticPoolDisappears(resourceName), @@ -194,6 +218,22 @@ resource "azurerm_sql_elasticpool" "test" { `, rInt, location) } +func testAccAzureRMSqlElasticPool_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_elasticpool" "import" { + name = "${azurerm_sql_elasticpool.test.name}" + resource_group_name = "${azurerm_sql_elasticpool.test.resource_group_name}" + location = "${azurerm_sql_elasticpool.test.location}" + server_name = "${azurerm_sql_elasticpool.test.server_name}" + edition = "${azurerm_sql_elasticpool.test.edition}" + dtu = "${azurerm_sql_elasticpool.test.dtu}" + pool_size = "${azurerm_sql_elasticpool.test.pool_size}" +} +`, testAccAzureRMSqlElasticPool_basic(rInt, location)) +} + func testAccAzureRMSqlElasticPool_resizedDtu(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_sql_firewall_rule.go b/azurerm/resource_arm_sql_firewall_rule.go index 05a420989d00..941378f4ad5e 100644 --- a/azurerm/resource_arm_sql_firewall_rule.go +++ b/azurerm/resource_arm_sql_firewall_rule.go @@ -5,6 +5,7 @@ import ( "log" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql" "github.com/hashicorp/terraform/helper/schema" @@ -63,6 +64,19 @@ func resourceArmSqlFirewallRuleCreateUpdate(d *schema.ResourceData, meta interfa startIPAddress := d.Get("start_ip_address").(string) endIPAddress := d.Get("end_ip_address").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serverName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SQL Firewall Rule %s (Resource Group %s, Server %s): %+v", name, resourceGroup, serverName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_sql_firewall_rule", *existing.ID) + } + } + parameters := sql.FirewallRule{ FirewallRuleProperties: &sql.FirewallRuleProperties{ StartIPAddress: utils.String(startIPAddress), @@ -71,12 +85,12 @@ func resourceArmSqlFirewallRuleCreateUpdate(d *schema.ResourceData, meta interfa } if _, err := client.CreateOrUpdate(ctx, resourceGroup, serverName, name, parameters); err != nil { - return fmt.Errorf("Error creating SQL Firewall Rule: %+v", err) + return fmt.Errorf("Error creating SQL Firewall Rule %s (Resource Group %s, Server %s): %+v", name, resourceGroup, serverName, err) } resp, err := client.Get(ctx, resourceGroup, serverName, name) if err != nil { - return fmt.Errorf("Error retrieving SQL Firewall Rule: %+v", err) + return fmt.Errorf("Error retrieving SQL Firewall Rule %s (Resource Group %s, Server %s): %+v", name, resourceGroup, serverName, err) } d.SetId(*resp.ID) @@ -105,7 +119,7 @@ func resourceArmSqlFirewallRuleRead(d *schema.ResourceData, meta interface{}) er return nil } - return fmt.Errorf("Error reading SQL Firewall Rule: %+v", err) + return fmt.Errorf("Error reading SQL Firewall Rule %s (Resource Group %s, Server %s): %+v", name, resourceGroup, serverName, err) } d.Set("name", resp.Name) diff --git a/azurerm/resource_arm_sql_firewall_rule_test.go b/azurerm/resource_arm_sql_firewall_rule_test.go index 674a335c87d4..3e5370be9206 100644 --- a/azurerm/resource_arm_sql_firewall_rule_test.go +++ b/azurerm/resource_arm_sql_firewall_rule_test.go @@ -13,8 +13,6 @@ import ( func TestAccAzureRMSqlFirewallRule_basic(t *testing.T) { resourceName := "azurerm_sql_firewall_rule.test" ri := tf.AccRandTimeInt() - preConfig := testAccAzureRMSqlFirewallRule_basic(ri, testLocation()) - postConfig := testAccAzureRMSqlFirewallRule_withUpdates(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +20,7 @@ func TestAccAzureRMSqlFirewallRule_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSqlFirewallRuleDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccAzureRMSqlFirewallRule_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlFirewallRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "start_ip_address", "0.0.0.0"), @@ -35,7 +33,7 @@ func TestAccAzureRMSqlFirewallRule_basic(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccAzureRMSqlFirewallRule_withUpdates(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlFirewallRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "start_ip_address", "10.0.17.62"), @@ -45,11 +43,38 @@ func TestAccAzureRMSqlFirewallRule_basic(t *testing.T) { }, }) } +func TestAccAzureRMSqlFirewallRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_sql_firewall_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSqlFirewallRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSqlFirewallRule_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSqlFirewallRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "start_ip_address", "0.0.0.0"), + resource.TestCheckResourceAttr(resourceName, "end_ip_address", "255.255.255.255"), + ), + }, + { + Config: testAccAzureRMSqlFirewallRule_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_sql_firewall_rule"), + }, + }, + }) +} func TestAccAzureRMSqlFirewallRule_disappears(t *testing.T) { resourceName := "azurerm_sql_firewall_rule.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSqlFirewallRule_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -57,7 +82,7 @@ func TestAccAzureRMSqlFirewallRule_disappears(t *testing.T) { CheckDestroy: testCheckAzureRMSqlFirewallRuleDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSqlFirewallRule_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlFirewallRuleExists(resourceName), testCheckAzureRMSqlFirewallRuleDisappears(resourceName), @@ -177,6 +202,20 @@ resource "azurerm_sql_firewall_rule" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMSqlFirewallRule_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_firewall_rule" "import" { + name = "${azurerm_sql_firewall_rule.test.name}" + resource_group_name = "${azurerm_sql_firewall_rule.test.resource_group_name}" + server_name = "${azurerm_sql_firewall_rule.test.server_name}" + start_ip_address = "${azurerm_sql_firewall_rule.test.start_ip_address}" + end_ip_address = "${azurerm_sql_firewall_rule.test.end_ip_address}" +} +`, testAccAzureRMSqlFirewallRule_basic(rInt, location)) +} + func testAccAzureRMSqlFirewallRule_withUpdates(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_sql_server.go b/azurerm/resource_arm_sql_server.go index 7f365b354d7d..177ce78897b7 100644 --- a/azurerm/resource_arm_sql_server.go +++ b/azurerm/resource_arm_sql_server.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -80,6 +81,19 @@ func resourceArmSqlServerCreateUpdate(d *schema.ResourceData, meta interface{}) tags := d.Get("tags").(map[string]interface{}) metadata := expandTags(tags) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SQL Server %q (Resource Group %q): %+v", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_sql_server", *existing.ID) + } + } + parameters := sql.Server{ Location: utils.String(location), Tags: metadata, @@ -96,7 +110,7 @@ func resourceArmSqlServerCreateUpdate(d *schema.ResourceData, meta interface{}) future, err := client.CreateOrUpdate(ctx, resGroup, name, parameters) if err != nil { - return err + return fmt.Errorf("Error issuing create/update request for SQL Server %q (Resource Group %q): %+v", name, resGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { @@ -105,12 +119,12 @@ func resourceArmSqlServerCreateUpdate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("SQL Server names need to be globally unique and %q is already in use.", name) } - return err + return fmt.Errorf("Error waiting on create/update future for SQL Server %q (Resource Group %q): %+v", name, resGroup, err) } resp, err := client.Get(ctx, resGroup, name) if err != nil { - return err + return fmt.Errorf("Error issuing get request for SQL Server %q (Resource Group %q): %+v", name, resGroup, err) } d.SetId(*resp.ID) diff --git a/azurerm/resource_arm_sql_server_test.go b/azurerm/resource_arm_sql_server_test.go index c12744706322..6de4e4c90113 100644 --- a/azurerm/resource_arm_sql_server_test.go +++ b/azurerm/resource_arm_sql_server_test.go @@ -13,7 +13,6 @@ import ( func TestAccAzureRMSqlServer_basic(t *testing.T) { resourceName := "azurerm_sql_server.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMSqlServer_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +20,7 @@ func TestAccAzureRMSqlServer_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSqlServerDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMSqlServer_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlServerExists(resourceName), ), @@ -35,6 +34,32 @@ func TestAccAzureRMSqlServer_basic(t *testing.T) { }, }) } +func TestAccAzureRMSqlServer_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_sql_server.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSqlServerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSqlServer_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSqlServerExists(resourceName), + ), + }, + { + Config: testAccAzureRMSqlServer_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_sql_server"), + }, + }, + }) +} func TestAccAzureRMSqlServer_disappears(t *testing.T) { resourceName := "azurerm_sql_server.test" @@ -192,6 +217,21 @@ resource "azurerm_sql_server" "test" { `, rInt, location, rInt) } +func testAccAzureRMSqlServer_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_server" "import" { + name = "${azurerm_sql_server.test.name}" + resource_group_name = "${azurerm_sql_server.test.resource_group_name}" + location = "${azurerm_sql_server.test.location}" + version = "${azurerm_sql_server.test.version}" + administrator_login = "${azurerm_sql_server.test.administrator_login}" + administrator_login_password = "${azurerm_sql_server.test.administrator_login_password}" +} +`, testAccAzureRMSqlServer_basic(rInt, location)) +} + func testAccAzureRMSqlServer_withTags(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -207,7 +247,7 @@ resource "azurerm_sql_server" "test" { administrator_login = "mradministrator" administrator_login_password = "thisIsDog11" - tags { + tags = { environment = "staging" database = "test" } @@ -230,7 +270,7 @@ resource "azurerm_sql_server" "test" { administrator_login = "mradministrator" administrator_login_password = "thisIsDog11" - tags { + tags = { environment = "production" } } diff --git a/azurerm/resource_arm_sql_virtual_network_rule.go b/azurerm/resource_arm_sql_virtual_network_rule.go index 30be91a2ae2c..d975bd072935 100644 --- a/azurerm/resource_arm_sql_virtual_network_rule.go +++ b/azurerm/resource_arm_sql_virtual_network_rule.go @@ -8,6 +8,7 @@ import ( "time" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql" "github.com/hashicorp/terraform/helper/resource" @@ -68,6 +69,19 @@ func resourceArmSqlVirtualNetworkRuleCreateUpdate(d *schema.ResourceData, meta i virtualNetworkSubnetId := d.Get("subnet_id").(string) ignoreMissingVnetServiceEndpoint := d.Get("ignore_missing_vnet_service_endpoint").(bool) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, serverName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing SQL Virtual Network Rule %q (SQL Server: %q, Resource Group: %q): %+v", name, serverName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_sql_virtual_network_rule", *existing.ID) + } + } + parameters := sql.VirtualNetworkRule{ VirtualNetworkRuleProperties: &sql.VirtualNetworkRuleProperties{ VirtualNetworkSubnetID: utils.String(virtualNetworkSubnetId), diff --git a/azurerm/resource_arm_sql_virtual_network_rule_test.go b/azurerm/resource_arm_sql_virtual_network_rule_test.go index 7de0d3bd0c0c..444604b334e1 100644 --- a/azurerm/resource_arm_sql_virtual_network_rule_test.go +++ b/azurerm/resource_arm_sql_virtual_network_rule_test.go @@ -21,8 +21,6 @@ import ( func TestAccAzureRMSqlVirtualNetworkRule_basic(t *testing.T) { resourceName := "azurerm_sql_virtual_network_rule.test" ri := tf.AccRandTimeInt() - preConfig := testAccAzureRMSqlVirtualNetworkRule_basic(ri, testLocation()) - postConfig := testAccAzureRMSqlVirtualNetworkRule_withUpdates(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -30,7 +28,7 @@ func TestAccAzureRMSqlVirtualNetworkRule_basic(t *testing.T) { CheckDestroy: testCheckAzureRMSqlVirtualNetworkRuleDestroy, Steps: []resource.TestStep{ { - Config: preConfig, + Config: testAccAzureRMSqlVirtualNetworkRule_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlVirtualNetworkRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "ignore_missing_vnet_service_endpoint", "false"), @@ -42,7 +40,7 @@ func TestAccAzureRMSqlVirtualNetworkRule_basic(t *testing.T) { ImportStateVerify: true, }, { - Config: postConfig, + Config: testAccAzureRMSqlVirtualNetworkRule_withUpdates(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMSqlVirtualNetworkRuleExists(resourceName), resource.TestCheckResourceAttr(resourceName, "ignore_missing_vnet_service_endpoint", "true"), @@ -52,6 +50,34 @@ func TestAccAzureRMSqlVirtualNetworkRule_basic(t *testing.T) { }) } +func TestAccAzureRMSqlVirtualNetworkRule_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + resourceName := "azurerm_sql_virtual_network_rule.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSqlVirtualNetworkRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSqlVirtualNetworkRule_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSqlVirtualNetworkRuleExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "ignore_missing_vnet_service_endpoint", "false"), + ), + }, + { + Config: testAccAzureRMSqlVirtualNetworkRule_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_sql_virtual_network_rule"), + }, + }, + }) +} + /* ---Testing for Success--- Test an update to the SQL Virtual Network Rule to connect to a different subnet, and @@ -465,6 +491,19 @@ resource "azurerm_sql_virtual_network_rule" "test" { } `, rInt, location, rInt, rInt, rInt, rInt, rInt) } +func testAccAzureRMSqlVirtualNetworkRule_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_sql_virtual_network_rule" "import" { + name = "${azurerm_sql_virtual_network_rule.test.name}" + resource_group_name = "${azurerm_sql_virtual_network_rule.test.resource_group_name}" + server_name = "${azurerm_sql_virtual_network_rule.test.server_name}" + subnet_id = "${azurerm_sql_virtual_network_rule.test.subnet_id}" + ignore_missing_vnet_service_endpoint = "${azurerm_sql_virtual_network_rule.test.ignore_missing_vnet_service_endpoint}" +} +`, testAccAzureRMSqlVirtualNetworkRule_basic(rInt, location)) +} /* (This test configuration is intended to succeed.) diff --git a/azurerm/resource_arm_storage_account.go b/azurerm/resource_arm_storage_account.go index 0272d60030b4..bb6922ebe638 100644 --- a/azurerm/resource_arm_storage_account.go +++ b/azurerm/resource_arm_storage_account.go @@ -7,9 +7,11 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/hashicorp/go-getter/helper/url" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -195,37 +197,72 @@ func resourceArmStorageAccount() *schema.Resource { Computed: true, }, + "primary_blob_host": { + Type: schema.TypeString, + Computed: true, + }, + "secondary_blob_endpoint": { Type: schema.TypeString, Computed: true, }, + "secondary_blob_host": { + Type: schema.TypeString, + Computed: true, + }, + "primary_queue_endpoint": { Type: schema.TypeString, Computed: true, }, + "primary_queue_host": { + Type: schema.TypeString, + Computed: true, + }, + "secondary_queue_endpoint": { Type: schema.TypeString, Computed: true, }, + "secondary_queue_host": { + Type: schema.TypeString, + Computed: true, + }, + "primary_table_endpoint": { Type: schema.TypeString, Computed: true, }, + "primary_table_host": { + Type: schema.TypeString, + Computed: true, + }, + "secondary_table_endpoint": { Type: schema.TypeString, Computed: true, }, + "secondary_table_host": { + Type: schema.TypeString, + Computed: true, + }, + // NOTE: The API does not appear to expose a secondary file endpoint "primary_file_endpoint": { Type: schema.TypeString, Computed: true, }, + "primary_file_host": { + Type: schema.TypeString, + Computed: true, + }, + "primary_access_key": { Type: schema.TypeString, Sensitive: true, @@ -323,12 +360,26 @@ func validateAzureRMStorageAccountTags(v interface{}, _ string) (warnings []stri } func resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) error { + ctx := meta.(*ArmClient).StopContext client := meta.(*ArmClient).storageServiceClient - resourceGroupName := d.Get("resource_group_name").(string) storageAccountName := d.Get("name").(string) - accountKind := d.Get("account_kind").(string) + resourceGroupName := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported { + existing, err := client.GetProperties(ctx, resourceGroupName, storageAccountName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Storage Account %q (Resource Group %q): %s", storageAccountName, resourceGroupName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_storage_account", *existing.ID) + } + } + accountKind := d.Get("account_kind").(string) location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) enableBlobEncryption := d.Get("enable_blob_encryption").(bool) @@ -393,7 +444,6 @@ func resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) e } // Create - ctx := meta.(*ArmClient).StopContext future, err := client.Create(ctx, resourceGroupName, storageAccountName, parameters) if err != nil { return fmt.Errorf("Error creating Azure Storage Account %q: %+v", storageAccountName, err) @@ -662,40 +712,25 @@ func resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) err d.Set("secondary_connection_string", scs) } - if endpoints := props.PrimaryEndpoints; endpoints != nil { - d.Set("primary_blob_endpoint", endpoints.Blob) - d.Set("primary_queue_endpoint", endpoints.Queue) - d.Set("primary_table_endpoint", endpoints.Table) - d.Set("primary_file_endpoint", endpoints.File) - - pscs := fmt.Sprintf("DefaultEndpointsProtocol=https;BlobEndpoint=%s;AccountName=%s;AccountKey=%s", - *endpoints.Blob, *resp.Name, *accessKeys[0].Value) - d.Set("primary_blob_connection_string", pscs) + if err := flattenAndSetAzureRmStorageAccountPrimaryEndpoints(d, props.PrimaryEndpoints); err != nil { + return fmt.Errorf("error setting primary endpoints and hosts for blob, queue, table and file: %+v", err) } - if endpoints := props.SecondaryEndpoints; endpoints != nil { - if blob := endpoints.Blob; blob != nil { - d.Set("secondary_blob_endpoint", blob) - sscs := fmt.Sprintf("DefaultEndpointsProtocol=https;BlobEndpoint=%s;AccountName=%s;AccountKey=%s", - *blob, *resp.Name, *accessKeys[1].Value) - d.Set("secondary_blob_connection_string", sscs) - } else { - d.Set("secondary_blob_endpoint", "") - d.Set("secondary_blob_connection_string", "") - } + var primaryBlobConnectStr string + if v := props.PrimaryEndpoints; v != nil { + primaryBlobConnectStr = getBlobConnectionString(v.Blob, resp.Name, accessKeys[0].Value) + } + d.Set("primary_blob_connection_string", primaryBlobConnectStr) - if endpoints.Queue != nil { - d.Set("secondary_queue_endpoint", endpoints.Queue) - } else { - d.Set("secondary_queue_endpoint", "") - } + if err := flattenAndSetAzureRmStorageAccountSecondaryEndpoints(d, props.SecondaryEndpoints); err != nil { + return fmt.Errorf("error setting secondary endpoints and hosts for blob, queue, table: %+v", err) + } - if endpoints.Table != nil { - d.Set("secondary_table_endpoint", endpoints.Table) - } else { - d.Set("secondary_table_endpoint", "") - } + var secondaryBlobConnectStr string + if v := props.SecondaryEndpoints; v != nil { + secondaryBlobConnectStr = getBlobConnectionString(v.Blob, resp.Name, accessKeys[1].Value) } + d.Set("secondary_blob_connection_string", secondaryBlobConnectStr) networkRules := props.NetworkRuleSet if networkRules != nil { @@ -785,8 +820,8 @@ func expandStorageAccountCustomDomain(d *schema.ResourceData) *storage.CustomDom name := domain["name"].(string) useSubDomain := domain["use_subdomain"].(bool) return &storage.CustomDomain{ - Name: utils.String(name), - UseSubDomain: utils.Bool(useSubDomain), + Name: utils.String(name), + UseSubDomainName: utils.Bool(useSubDomain), } } @@ -963,3 +998,139 @@ func flattenAzureRmStorageAccountIdentity(identity *storage.Identity) []interfac return []interface{}{result} } + +func getBlobConnectionString(blobEndpoint *string, acctName *string, acctKey *string) string { + var endpoint string + if blobEndpoint != nil { + endpoint = *blobEndpoint + } + + var name string + if acctName != nil { + name = *acctName + } + + var key string + if acctKey != nil { + key = *acctKey + } + + return fmt.Sprintf("DefaultEndpointsProtocol=https;BlobEndpoint=%s;AccountName=%s;AccountKey=%s", endpoint, name, key) +} + +func flattenAndSetAzureRmStorageAccountPrimaryEndpoints(d *schema.ResourceData, primary *storage.Endpoints) error { + var blobEndpoint, blobHost string + if primary != nil { + if v := primary.Blob; v != nil { + blobEndpoint = *v + + u, err := url.Parse(*v) + if err != nil { + return fmt.Errorf("invalid blob endpoint for parsing: %q", *v) + } + blobHost = u.Host + } + } + d.Set("primary_blob_endpoint", blobEndpoint) + d.Set("primary_blob_host", blobHost) + + var queueEndpoint, queueHost string + if primary != nil { + if v := primary.Queue; v != nil { + queueEndpoint = *v + + u, err := url.Parse(*v) + if err != nil { + return fmt.Errorf("invalid queue endpoint for parsing: %q", *v) + } + queueHost = u.Host + } + } + d.Set("primary_queue_endpoint", queueEndpoint) + d.Set("primary_queue_host", queueHost) + + var tableEndpoint, tableHost string + if primary != nil { + if v := primary.Table; v != nil { + tableEndpoint = *v + + u, err := url.Parse(*v) + if err != nil { + return fmt.Errorf("invalid table endpoint for parsing: %q", *v) + } + tableHost = u.Host + } + } + d.Set("primary_table_endpoint", tableEndpoint) + d.Set("primary_table_host", tableHost) + + var fileEndpoint, fileHost string + if primary != nil { + if v := primary.File; v != nil { + fileEndpoint = *v + + u, err := url.Parse(*v) + if err != nil { + return fmt.Errorf("invalid file endpoint for parsing: %q", *v) + } + fileHost = u.Host + } + } + d.Set("primary_file_endpoint", fileEndpoint) + d.Set("primary_file_host", fileHost) + + if primary == nil { + return fmt.Errorf("primary endpoints should not be empty") + } + + return nil +} + +func flattenAndSetAzureRmStorageAccountSecondaryEndpoints(d *schema.ResourceData, secondary *storage.Endpoints) error { + var blobEndpoint, blobHost string + if secondary != nil { + if v := secondary.Blob; v != nil { + blobEndpoint = *v + + if u, err := url.Parse(*v); err == nil { + blobHost = u.Host + } else { + return fmt.Errorf("invalid blob endpoint for parsing: %q", *v) + } + } + } + d.Set("secondary_blob_endpoint", blobEndpoint) + d.Set("secondary_blob_host", blobHost) + + var queueEndpoint, queueHost string + if secondary != nil { + if v := secondary.Queue; v != nil { + queueEndpoint = *v + + u, err := url.Parse(*v) + if err != nil { + return fmt.Errorf("invalid queue endpoint for parsing: %q", *v) + } + queueHost = u.Host + } + } + d.Set("secondary_queue_endpoint", queueEndpoint) + d.Set("secondary_queue_host", queueHost) + + var tableEndpoint, tableHost string + if secondary != nil { + if v := secondary.Table; v != nil { + tableEndpoint = *v + + u, err := url.Parse(*v) + if err != nil { + return fmt.Errorf("invalid table endpoint for parsing: %q", *v) + } + tableHost = u.Host + } + } + d.Set("secondary_table_endpoint", tableEndpoint) + d.Set("secondary_table_host", tableHost) + + return nil +} diff --git a/azurerm/resource_arm_storage_account_test.go b/azurerm/resource_arm_storage_account_test.go index 5859f0c051f6..fc67acdc0875 100644 --- a/azurerm/resource_arm_storage_account_test.go +++ b/azurerm/resource_arm_storage_account_test.go @@ -97,6 +97,36 @@ func TestAccAzureRMStorageAccount_basic(t *testing.T) { }) } +func TestAccAzureRMStorageAccount_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_storage_account.testsa" + ri := tf.AccRandTimeInt() + rs := acctest.RandString(4) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageAccountDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageAccount_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageAccountExists(resourceName), + ), + }, + { + Config: testAccAzureRMStorageAccount_requiresImport(ri, rs, location), + ExpectError: testRequiresImportError("azurerm_storage_account"), + }, + }, + }) +} + func TestAccAzureRMStorageAccount_premium(t *testing.T) { resourceName := "azurerm_storage_account.testsa" ri := tf.AccRandTimeInt() @@ -622,13 +652,28 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "production" } } `, rInt, location, rString) } +func testAccAzureRMStorageAccount_requiresImport(rInt int, rString string, location string) string { + template := testAccAzureRMStorageAccount_basic(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_account" "import" { + name = "${azurerm_storage_account.testsa.name}" + resource_group_name = "${azurerm_storage_account.testrg.resource_group_name}" + location = "${azurerm_storage_account.testrg.location}" + account_tier = "${azurerm_storage_account.testrg.account_tier}" + account_replication_type = "${azurerm_storage_account.testrg.account_replication_type}" +} +`, template) +} + func testAccAzureRMStorageAccount_premium(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "testrg" { @@ -644,7 +689,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Premium" account_replication_type = "LRS" - tags { + tags = { environment = "production" } } @@ -666,7 +711,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Standard" account_replication_type = "GRS" - tags { + tags = { environment = "staging" } } @@ -689,7 +734,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" enable_blob_encryption = true - tags { + tags = { environment = "production" } } @@ -712,7 +757,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" enable_blob_encryption = false - tags { + tags = { environment = "production" } } @@ -735,7 +780,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" enable_file_encryption = true - tags { + tags = { environment = "production" } } @@ -758,7 +803,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" enable_file_encryption = false - tags { + tags = { environment = "production" } } @@ -781,7 +826,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" enable_https_traffic_only = true - tags { + tags = { environment = "production" } } @@ -804,7 +849,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" enable_https_traffic_only = false - tags { + tags = { environment = "production" } } @@ -827,7 +872,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "production" } } @@ -851,7 +896,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" access_tier = "Cool" - tags { + tags = { environment = "production" } } @@ -874,7 +919,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "production" } } @@ -898,7 +943,7 @@ resource "azurerm_storage_account" "testsa" { account_replication_type = "LRS" access_tier = "Cool" - tags { + tags = { environment = "production" } } @@ -919,7 +964,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "standard" account_replication_type = "lrs" - tags { + tags = { environment = "production" } } @@ -945,7 +990,7 @@ resource "azurerm_storage_account" "testsa" { type = "SystemAssigned" } - tags { + tags = { environment = "production" } } @@ -986,7 +1031,7 @@ resource "azurerm_storage_account" "testsa" { virtual_network_subnet_ids = ["${azurerm_subnet.test.id}"] } - tags { + tags = { environment = "production" } } @@ -1027,7 +1072,7 @@ resource "azurerm_storage_account" "testsa" { bypass = ["Logging", "Metrics"] } - tags { + tags = { environment = "production" } } diff --git a/azurerm/resource_arm_storage_blob.go b/azurerm/resource_arm_storage_blob.go index cadfd14afa0d..9dd53a057da1 100644 --- a/azurerm/resource_arm_storage_blob.go +++ b/azurerm/resource_arm_storage_blob.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/hashicorp/terraform/helper/validation" @@ -143,6 +144,19 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro container := blobClient.GetContainerReference(containerName) blob := container.GetBlobReference(name) + // gives us https://example.blob.core.windows.net/container/file.vhd + id := fmt.Sprintf("https://%s.blob.%s/%s/%s", storageAccountName, env.StorageEndpointSuffix, containerName, name) + if requireResourcesToBeImported && d.IsNewResource() { + exists, err := blob.Exists() + if err != nil { + return fmt.Errorf("Error checking if Blob %q exists (Container %q / Account %q / Resource Group %q): %s", name, containerName, storageAccountName, resourceGroupName, err) + } + + if exists { + return tf.ImportAsExistsError("azurerm_storage_blob", id) + } + } + if sourceUri != "" { options := &storage.CopyOptions{} if err := blob.Copy(sourceUri, options); err != nil { @@ -187,8 +201,6 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro } } - // gives us https://example.blob.core.windows.net/container/file.vhd - id := fmt.Sprintf("https://%s.blob.%s/%s/%s", storageAccountName, env.StorageEndpointSuffix, containerName, name) d.SetId(id) return resourceArmStorageBlobRead(d, meta) } diff --git a/azurerm/resource_arm_storage_blob_test.go b/azurerm/resource_arm_storage_blob_test.go index b865de8e2484..33035fee4eeb 100644 --- a/azurerm/resource_arm_storage_blob_test.go +++ b/azurerm/resource_arm_storage_blob_test.go @@ -20,7 +20,7 @@ func TestAccAzureRMStorageBlob_basic(t *testing.T) { resourceName := "azurerm_storage_blob.test" ri := tf.AccRandTimeInt() rs := strings.ToLower(acctest.RandString(11)) - config := testAccAzureRMStorageBlob_basic(ri, rs, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -28,7 +28,7 @@ func TestAccAzureRMStorageBlob_basic(t *testing.T) { CheckDestroy: testCheckAzureRMStorageBlobDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMStorageBlob_basic(ri, rs, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMStorageBlobExists(resourceName), ), @@ -42,6 +42,35 @@ func TestAccAzureRMStorageBlob_basic(t *testing.T) { }, }) } +func TestAccAzureRMStorageBlob_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_storage_blob.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageBlobDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageBlob_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageBlobExists(resourceName), + ), + }, + { + Config: testAccAzureRMStorageBlob_requiresImport(ri, rs, location), + ExpectError: testRequiresImportError("azurerm_storage_blob"), + }, + }, + }) +} func TestAccAzureRMStorageBlob_disappears(t *testing.T) { resourceName := "azurerm_storage_blob.test" @@ -441,7 +470,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -466,6 +495,22 @@ resource "azurerm_storage_blob" "test" { `, rInt, location, rString) } +func testAccAzureRMStorageBlob_requiresImport(rInt int, rString string, location string) string { + template := testAccAzureRMStorageBlob_basic(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_blob" "import" { + name = "${azurerm_storage_blob.test.name}" + resource_group_name = "${azurerm_storage_blob.test.resource_group_name}" + storage_account_name = "${azurerm_storage_blob.test.storage_account_name}" + storage_container_name = "${azurerm_storage_blob.test.storage_container_name}" + type = "${azurerm_storage_blob.test.type}" + size = "${azurerm_storage_blob.test.size}" +} +`, template) +} + func testAccAzureRMStorageBlobBlock_source(rInt int, rString string, sourceBlobName string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -480,7 +525,7 @@ resource "azurerm_storage_account" "source" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -521,7 +566,7 @@ resource "azurerm_storage_account" "source" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -562,7 +607,7 @@ resource "azurerm_storage_account" "source" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -612,7 +657,7 @@ resource "azurerm_storage_account" "source" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_storage_container.go b/azurerm/resource_arm_storage_container.go index 64bd99b1585c..e3f271a5151a 100644 --- a/azurerm/resource_arm_storage_container.go +++ b/azurerm/resource_arm_storage_container.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" ) func resourceArmStorageContainer() *schema.Resource { @@ -110,9 +111,20 @@ func resourceArmStorageContainerCreateUpdate(d *schema.ResourceData, meta interf accessType = storage.ContainerAccessType(d.Get("container_access_type").(string)) } - log.Printf("[INFO] Creating container %q in storage account %q.", name, storageAccountName) reference := blobClient.GetContainerReference(name) + id := fmt.Sprintf("https://%s.blob.%s/%s", storageAccountName, armClient.environment.StorageEndpointSuffix, name) + if requireResourcesToBeImported && d.IsNewResource() { + exists, e := reference.Exists() + if e != nil { + return fmt.Errorf("Error checking if Storage Container %q exists (Account %q / Resource Group %q): %s", name, storageAccountName, resourceGroupName, e) + } + + if exists { + return tf.ImportAsExistsError("azurerm_storage_container", id) + } + } + log.Printf("[INFO] Creating container %q in storage account %q.", name, storageAccountName) err = resource.Retry(120*time.Second, checkContainerIsCreated(reference)) if err != nil { return fmt.Errorf("Error creating container %q in storage account %q: %s", name, storageAccountName, err) @@ -127,7 +139,6 @@ func resourceArmStorageContainerCreateUpdate(d *schema.ResourceData, meta interf return fmt.Errorf("Error setting permissions for container %s in storage account %s: %+v", name, storageAccountName, err) } - id := fmt.Sprintf("https://%s.blob.%s/%s", storageAccountName, armClient.environment.StorageEndpointSuffix, name) d.SetId(id) return resourceArmStorageContainerRead(d, meta) } @@ -163,20 +174,30 @@ func resourceArmStorageContainerRead(d *schema.ResourceData, meta interface{}) e return nil } - containers, err := blobClient.ListContainers(storage.ListContainersParameters{ + var container *storage.Container + listParams := storage.ListContainersParameters{ Prefix: id.containerName, Timeout: 90, - }) - if err != nil { - return fmt.Errorf("Failed to retrieve storage containers in account %q: %s", id.containerName, err) } - var container *storage.Container - for _, cont := range containers.Containers { - if cont.Name == id.containerName { - container = &cont + for { + resp, err := blobClient.ListContainers(listParams) + if err != nil { + return fmt.Errorf("Failed to retrieve storage resp in account %q: %s", id.containerName, err) + } + + for _, c := range resp.Containers { + if c.Name == id.containerName { + container = &c + break + } + } + + if resp.NextMarker == "" { break } + + listParams.Marker = resp.NextMarker } if container == nil { diff --git a/azurerm/resource_arm_storage_container_test.go b/azurerm/resource_arm_storage_container_test.go index c953dab0ddbe..3de0224b4e8a 100644 --- a/azurerm/resource_arm_storage_container_test.go +++ b/azurerm/resource_arm_storage_container_test.go @@ -41,6 +41,38 @@ func TestAccAzureRMStorageContainer_basic(t *testing.T) { }) } +func TestAccAzureRMStorageContainer_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_storage_container.test" + var c storage.Container + + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageContainerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageContainer_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageContainerExists(resourceName, &c), + ), + }, + { + Config: testAccAzureRMStorageContainer_requiresImport(ri, rs, location), + ExpectError: testRequiresImportError("azurerm_storage_container"), + }, + }, + }) +} + func TestAccAzureRMStorageContainer_update(t *testing.T) { resourceName := "azurerm_storage_container.test" var c storage.Container @@ -310,7 +342,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -324,6 +356,20 @@ resource "azurerm_storage_container" "test" { `, rInt, location, rString) } +func testAccAzureRMStorageContainer_requiresImport(rInt int, rString string, location string) string { + template := testAccAzureRMStorageContainer_basic(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_container" "import" { + name = "${azurerm_storage_container.test.name}" + resource_group_name = "${azurerm_storage_container.test.resource_group_name}" + storage_account_name = "${azurerm_storage_container.test.storage_account_name}" + container_access_type = "${azurerm_storage_container.test.container_access_type}" +} +`, template) +} + func testAccAzureRMStorageContainer_update(rInt int, rString string, location string, accessType string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -338,7 +384,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -366,7 +412,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_storage_queue.go b/azurerm/resource_arm_storage_queue.go index 60c8b326f725..bb7125955452 100644 --- a/azurerm/resource_arm_storage_queue.go +++ b/azurerm/resource_arm_storage_queue.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" ) func resourceArmStorageQueue() *schema.Resource { @@ -85,15 +86,26 @@ func resourceArmStorageQueueCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Storage Account %q Not Found", storageAccountName) } - log.Printf("[INFO] Creating queue %q in storage account %q", name, storageAccountName) queueReference := queueClient.GetQueueReference(name) + id := fmt.Sprintf("https://%s.queue.%s/%s", storageAccountName, environment.StorageEndpointSuffix, name) + if requireResourcesToBeImported { + exists, e := queueReference.Exists() + if e != nil { + return fmt.Errorf("Error checking if Queue %q exists (Account %q / Resource Group %q): %s", name, storageAccountName, resourceGroupName, e) + } + + if exists { + return tf.ImportAsExistsError("azurerm_storage_queue", id) + } + } + + log.Printf("[INFO] Creating queue %q in storage account %q", name, storageAccountName) options := &storage.QueueServiceOptions{} err = queueReference.Create(options) if err != nil { return fmt.Errorf("Error creating storage queue on Azure: %s", err) } - id := fmt.Sprintf("https://%s.queue.%s/%s", storageAccountName, environment.StorageEndpointSuffix, name) d.SetId(id) return resourceArmStorageQueueRead(d, meta) } diff --git a/azurerm/resource_arm_storage_queue_test.go b/azurerm/resource_arm_storage_queue_test.go index b05bee24ecdc..66ef960de07c 100644 --- a/azurerm/resource_arm_storage_queue_test.go +++ b/azurerm/resource_arm_storage_queue_test.go @@ -77,6 +77,36 @@ func TestAccAzureRMStorageQueue_basic(t *testing.T) { }) } +func TestAccAzureRMStorageQueue_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_storage_queue.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageQueueDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageQueue_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageQueueExists(resourceName), + ), + }, + { + Config: testAccAzureRMStorageQueue_requiresImport(ri, rs, location), + ExpectError: testRequiresImportError("azurerm_storage_queue"), + }, + }, + }) +} + func testCheckAzureRMStorageQueueExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -167,7 +197,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -179,3 +209,16 @@ resource "azurerm_storage_queue" "test" { } `, rInt, location, rString, rInt) } + +func testAccAzureRMStorageQueue_requiresImport(rInt int, rString string, location string) string { + template := testAccAzureRMStorageQueue_basic(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_queue" "import" { + name = "${azurerm_storage_queue.test.name}" + resource_group_name = "${azurerm_storage_queue.test.resource_group_name}" + storage_account_name = "${azurerm_storage_queue.test.storage_account_name}" +} +`, template) +} diff --git a/azurerm/resource_arm_storage_share.go b/azurerm/resource_arm_storage_share.go index 663fe0be27ec..7a7aab8c08b5 100644 --- a/azurerm/resource_arm_storage_share.go +++ b/azurerm/resource_arm_storage_share.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/schema" @@ -71,6 +72,19 @@ func resourceArmStorageShareCreate(d *schema.ResourceData, meta interface{}) err log.Printf("[INFO] Creating share %q in storage account %q", name, storageAccountName) reference := fileClient.GetShareReference(name) + + id := fmt.Sprintf("%s/%s/%s", name, resourceGroupName, storageAccountName) + if requireResourcesToBeImported { + exists, e := reference.Exists() + if e != nil { + return fmt.Errorf("Error checking if Share %q exists (Account %q / Resource Group %q): %s", name, storageAccountName, resourceGroupName, e) + } + + if exists { + return tf.ImportAsExistsError("azurerm_storage_share", id) + } + } + err = reference.Create(options) if err != nil { return fmt.Errorf("Error creating Storage Share %q reference (storage account: %q) : %+v", name, storageAccountName, err) @@ -90,7 +104,7 @@ func resourceArmStorageShareCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error setting properties on Storage Share %q: %+v", name, err) } - d.SetId(fmt.Sprintf("%s/%s/%s", name, resourceGroupName, storageAccountName)) + d.SetId(id) return resourceArmStorageShareRead(d, meta) } diff --git a/azurerm/resource_arm_storage_share_test.go b/azurerm/resource_arm_storage_share_test.go index 5c9a192c6d47..a5d89cc9a990 100644 --- a/azurerm/resource_arm_storage_share_test.go +++ b/azurerm/resource_arm_storage_share_test.go @@ -41,6 +41,38 @@ func TestAccAzureRMStorageShare_basic(t *testing.T) { }) } +func TestAccAzureRMStorageShare_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + var sS storage.Share + + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + resourceName := "azurerm_storage_share.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageShare_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareExists(resourceName, &sS), + ), + }, + { + Config: testAccAzureRMStorageShare_requiresImport(ri, rs, location), + ExpectError: testRequiresImportError("azurerm_storage_share"), + }, + }, + }) +} + func TestAccAzureRMStorageShare_disappears(t *testing.T) { var sS storage.Share @@ -252,7 +284,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -265,6 +297,19 @@ resource "azurerm_storage_share" "test" { `, rInt, location, rString) } +func testAccAzureRMStorageShare_requiresImport(rInt int, rString string, location string) string { + template := testAccAzureRMStorageShare_basic(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share" "import" { + name = "${azurerm_storage_share.test.name}" + resource_group_name = "${azurerm_storage_share.test.resource_group_name}" + storage_account_name = "${azurerm_storage_share.test.storage_account_name}" +} +`, template) +} + func testAccAzureRMStorageShare_updateQuota(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -279,7 +324,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_storage_table.go b/azurerm/resource_arm_storage_table.go index c669f874025f..20d4e9fc557b 100644 --- a/azurerm/resource_arm_storage_table.go +++ b/azurerm/resource_arm_storage_table.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" ) func resourceArmStorageTable() *schema.Resource { @@ -73,9 +74,24 @@ func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) err } table := tableClient.GetTableReference(name) + id := fmt.Sprintf("https://%s.table.%s/%s", storageAccountName, environment.StorageEndpointSuffix, name) - log.Printf("[INFO] Creating table %q in storage account %q.", name, storageAccountName) + if requireResourcesToBeImported { + metaDataLevel := storage.MinimalMetadata + options := &storage.QueryTablesOptions{} + tables, e := tableClient.QueryTables(metaDataLevel, options) + if e != nil { + return fmt.Errorf("Error checking if Table %q exists (Account %q / Resource Group %q): %s", name, storageAccountName, resourceGroupName, e) + } + + for _, table := range tables.Tables { + if table.Name == name { + return tf.ImportAsExistsError("azurerm_storage_table", id) + } + } + } + log.Printf("[INFO] Creating table %q in storage account %q.", name, storageAccountName) timeout := uint(60) options := &storage.TableOptions{} err = table.Create(timeout, storage.NoMetadata, options) @@ -83,7 +99,6 @@ func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error creating table %q in storage account %q: %s", name, storageAccountName, err) } - id := fmt.Sprintf("https://%s.table.%s/%s", storageAccountName, environment.StorageEndpointSuffix, name) d.SetId(id) return resourceArmStorageTableRead(d, meta) } diff --git a/azurerm/resource_arm_storage_table_test.go b/azurerm/resource_arm_storage_table_test.go index c0ee0817b2c6..547242678a6b 100644 --- a/azurerm/resource_arm_storage_table_test.go +++ b/azurerm/resource_arm_storage_table_test.go @@ -41,6 +41,38 @@ func TestAccAzureRMStorageTable_basic(t *testing.T) { }) } +func TestAccAzureRMStorageTable_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_storage_table.test" + var table storage.Table + + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageTable_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageTableExists(resourceName, &table), + ), + }, + { + Config: testAccAzureRMStorageTable_requiresImport(ri, rs, location), + ExpectError: testRequiresImportError("azurerm_storage_table"), + }, + }, + }) +} + func TestAccAzureRMStorageTable_disappears(t *testing.T) { var table storage.Table @@ -238,7 +270,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -250,3 +282,16 @@ resource "azurerm_storage_table" "test" { } `, rInt, location, rString, rInt) } + +func testAccAzureRMStorageTable_requiresImport(rInt int, rString string, location string) string { + template := testAccAzureRMStorageTable_basic(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_table" "import" { + name = "${azurerm_storage_table.test.name}" + resource_group_name = "${azurerm_storage_table.test.resource_group_name}" + storage_account_name = "${azurerm_storage_table.test.storage_account_name}" +} +`, template) +} diff --git a/azurerm/resource_arm_subnet.go b/azurerm/resource_arm_subnet.go index c68c92b58e08..3b678242dd47 100644 --- a/azurerm/resource_arm_subnet.go +++ b/azurerm/resource_arm_subnet.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -127,6 +128,20 @@ func resourceArmSubnetCreateUpdate(d *schema.ResourceData, meta interface{}) err name := d.Get("name").(string) vnetName := d.Get("virtual_network_name").(string) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, vnetName, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Subnet %q (Virtual Network %q / Resource Group %q): %s", name, vnetName, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_subnet", *existing.ID) + } + } + addressPrefix := d.Get("address_prefix").(string) azureRMLockByName(vnetName, virtualNetworkResourceName) @@ -183,11 +198,11 @@ func resourceArmSubnetCreateUpdate(d *schema.ResourceData, meta interface{}) err future, err := client.CreateOrUpdate(ctx, resGroup, vnetName, name, subnet) if err != nil { - return fmt.Errorf("Error Creating/Updating Subnet %q (VN %q / Resource Group %q): %+v", name, vnetName, resGroup, err) + return fmt.Errorf("Error Creating/Updating Subnet %q (Virtual Network %q / Resource Group %q): %+v", name, vnetName, resGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for completion of Subnet %q (VN %q / Resource Group %q): %+v", name, vnetName, resGroup, err) + return fmt.Errorf("Error waiting for completion of Subnet %q (Virtual Network %q / Resource Group %q): %+v", name, vnetName, resGroup, err) } read, err := client.Get(ctx, resGroup, vnetName, name, "") @@ -195,7 +210,7 @@ func resourceArmSubnetCreateUpdate(d *schema.ResourceData, meta interface{}) err return err } if read.ID == nil { - return fmt.Errorf("Cannot read ID of Subnet %q (VN %q / Resource Group %q)", vnetName, name, resGroup) + return fmt.Errorf("Cannot read ID of Subnet %q (Virtual Network %q / Resource Group %q)", vnetName, name, resGroup) } d.SetId(*read.ID) @@ -305,11 +320,11 @@ func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error { future, err := client.Delete(ctx, resGroup, vnetName, name) if err != nil { - return fmt.Errorf("Error deleting Subnet %q (VN %q / Resource Group %q): %+v", name, vnetName, resGroup, err) + return fmt.Errorf("Error deleting Subnet %q (Virtual Network %q / Resource Group %q): %+v", name, vnetName, resGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for completion for Subnet %q (VN %q / Resource Group %q): %+v", name, vnetName, resGroup, err) + return fmt.Errorf("Error waiting for completion for Subnet %q (Virtual Network %q / Resource Group %q): %+v", name, vnetName, resGroup, err) } return nil diff --git a/azurerm/resource_arm_subnet_network_security_group_association.go b/azurerm/resource_arm_subnet_network_security_group_association.go index 5c6fb52a7209..6d9e3c9ad346 100644 --- a/azurerm/resource_arm_subnet_network_security_group_association.go +++ b/azurerm/resource_arm_subnet_network_security_group_association.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -76,6 +77,15 @@ func resourceArmSubnetNetworkSecurityGroupAssociationCreate(d *schema.ResourceDa } if props := subnet.SubnetPropertiesFormat; props != nil { + if requireResourcesToBeImported { + if nsg := props.NetworkSecurityGroup; nsg != nil { + // we're intentionally not checking the ID - if there's a NSG, it needs to be imported + if nsg.ID != nil && subnet.ID != nil { + return tf.ImportAsExistsError("azurerm_subnet_network_security_group_association", *subnet.ID) + } + } + } + props.NetworkSecurityGroup = &network.SecurityGroup{ ID: utils.String(networkSecurityGroupId), } diff --git a/azurerm/resource_arm_subnet_network_security_group_association_test.go b/azurerm/resource_arm_subnet_network_security_group_association_test.go index c7cc691c0144..09e438338dc7 100644 --- a/azurerm/resource_arm_subnet_network_security_group_association_test.go +++ b/azurerm/resource_arm_subnet_network_security_group_association_test.go @@ -36,6 +36,36 @@ func TestAccAzureRMSubnetNetworkSecurityGroupAssociation_basic(t *testing.T) { }) } +func TestAccAzureRMSubnetNetworkSecurityGroupAssociation_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_subnet_network_security_group_association.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional as this is a Virtual Resource + CheckDestroy: testCheckAzureRMSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSubnetNetworkSecurityGroupAssociation_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSubnetNetworkSecurityGroupAssociationExists(resourceName), + ), + }, + { + Config: testAccAzureRMSubnetNetworkSecurityGroupAssociation_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_subnet_network_security_group_association"), + }, + }, + }) +} + func TestAccAzureRMSubnetNetworkSecurityGroupAssociation_deleted(t *testing.T) { resourceName := "azurerm_subnet_network_security_group_association.test" ri := tf.AccRandTimeInt() @@ -231,3 +261,11 @@ resource "azurerm_subnet_network_security_group_association" "test" { } `, rInt, location, rInt, rInt, rInt) } + +func testAccAzureRMSubnetNetworkSecurityGroupAssociation_requiresImport(rInt int, location string) string { + template := testAccAzureRMSubnetNetworkSecurityGroupAssociation_basic(rInt, location) + return fmt.Sprintf(` +%s + +`, template) +} diff --git a/azurerm/resource_arm_subnet_route_table_association.go b/azurerm/resource_arm_subnet_route_table_association.go index 85e675964db8..bd71159a0b82 100644 --- a/azurerm/resource_arm_subnet_route_table_association.go +++ b/azurerm/resource_arm_subnet_route_table_association.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -76,6 +77,15 @@ func resourceArmSubnetRouteTableAssociationCreate(d *schema.ResourceData, meta i } if props := subnet.SubnetPropertiesFormat; props != nil { + if requireResourcesToBeImported { + if rt := props.RouteTable; rt != nil { + // we're intentionally not checking the ID - if there's a RouteTable, it needs to be imported + if rt.ID != nil && subnet.ID != nil { + return tf.ImportAsExistsError("azurerm_subnet_route_table_association", *subnet.ID) + } + } + } + props.RouteTable = &network.RouteTable{ ID: utils.String(routeTableId), } diff --git a/azurerm/resource_arm_subnet_route_table_association_test.go b/azurerm/resource_arm_subnet_route_table_association_test.go index 6b3117ca350c..e34c46485303 100644 --- a/azurerm/resource_arm_subnet_route_table_association_test.go +++ b/azurerm/resource_arm_subnet_route_table_association_test.go @@ -35,6 +35,35 @@ func TestAccAzureRMSubnetRouteTableAssociation_basic(t *testing.T) { }, }) } +func TestAccAzureRMSubnetRouteTableAssociation_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_subnet_route_table_association.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // intentional since this is a Virtual Resource + CheckDestroy: testCheckAzureRMSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSubnetRouteTableAssociation_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSubnetRouteTableAssociationExists(resourceName), + ), + }, + { + Config: testAccAzureRMSubnetRouteTableAssociation_requiresImport(ri, location), + ExpectError: testRequiresImportError(""), + }, + }, + }) +} func TestAccAzureRMSubnetRouteTableAssociation_deleted(t *testing.T) { resourceName := "azurerm_subnet_route_table_association.test" @@ -226,3 +255,15 @@ resource "azurerm_subnet_route_table_association" "test" { } `, rInt, location, rInt, rInt, rInt, rInt) } + +func testAccAzureRMSubnetRouteTableAssociation_requiresImport(rInt int, location string) string { + template := testAccAzureRMSubnetRouteTableAssociation_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_subnet_route_table_association" "import" { + subnet_id = "${azurerm_subnet_route_table_association.test.subnet_id}" + route_table_id = "${azurerm_subnet_route_table_association.test.route_table_id}" +} +`, template) +} diff --git a/azurerm/resource_arm_subnet_test.go b/azurerm/resource_arm_subnet_test.go index 6848dff78978..ef4b9feea013 100644 --- a/azurerm/resource_arm_subnet_test.go +++ b/azurerm/resource_arm_subnet_test.go @@ -38,6 +38,35 @@ func TestAccAzureRMSubnet_basic(t *testing.T) { }) } +func TestAccAzureRMSubnet_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_subnet.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMSubnet_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSubnetExists(resourceName), + ), + }, + { + Config: testAccAzureRMSubnet_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_subnet"), + }, + }, + }) +} + func TestAccAzureRMSubnet_delegation(t *testing.T) { resourceName := "azurerm_subnet.test" ri := tf.AccRandTimeInt() @@ -221,6 +250,57 @@ func TestAccAzureRMSubnet_disappears(t *testing.T) { }) } +func TestAccAzureRMSubnet_serviceEndpoints(t *testing.T) { + resourceName := "azurerm_subnet.test" + ri := tf.AccRandTimeInt() + config := testAccAzureRMSubnet_serviceEndpoints(ri, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSubnetExists("azurerm_subnet.test"), + resource.TestCheckResourceAttr(resourceName, "service_endpoints.#", "2"), + ), + }, + }, + }) +} + +func TestAccAzureRMSubnet_serviceEndpointsVNetUpdate(t *testing.T) { + resourceName := "azurerm_subnet.test" + ri := tf.AccRandTimeInt() + location := testLocation() + config := testAccAzureRMSubnet_serviceEndpoints(ri, location) + updatedConfig := testAccAzureRMSubnet_serviceEndpointsVNetUpdate(ri, location) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSubnetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "service_endpoints.#", "2"), + ), + }, + { + Config: updatedConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMSubnetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "service_endpoints.#", "2"), + ), + }, + }, + }) +} + func testCheckAzureRMSubnetExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -362,26 +442,6 @@ func testCheckAzureRMSubnetDestroy(s *terraform.State) error { return nil } -func TestAccAzureRMSubnet_serviceEndpoints(t *testing.T) { - - ri := tf.AccRandTimeInt() - config := testAccAzureRMSubnet_serviceEndpoints(ri, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSubnetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSubnetExists("azurerm_subnet.test"), - ), - }, - }, - }) -} - func testAccAzureRMSubnet_basic(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -405,6 +465,20 @@ resource "azurerm_subnet" "test" { `, rInt, location, rInt, rInt) } +func testAccAzureRMSubnet_requiresImport(rInt int, location string) string { + template := testAccAzureRMSubnet_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_subnet" "import" { + name = "${azurerm_subnet.test.name}" + resource_group_name = "${azurerm_subnet.test.resource_group_name}" + virtual_network_name = "${azurerm_subnet.test.virtual_network_name}" + address_prefix = "${azurerm_subnet.test.address_prefix}" +} +`, template) +} + func testAccAzureRMSubnet_delegation(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -515,7 +589,7 @@ resource "azurerm_resource_group" "test" { name = "acctestRG-%d" location = "%s" - tags { + tags = { environment = "Testing" } } @@ -537,7 +611,7 @@ resource "azurerm_network_security_group" "test_secgroup" { destination_address_prefix = "*" } - tags { + tags = { environment = "Testing" } } @@ -548,7 +622,7 @@ resource "azurerm_virtual_network" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Testing" } } @@ -573,7 +647,7 @@ resource "azurerm_route_table" "test" { next_hop_in_ip_address = "10.10.1.1" } - tags { + tags = { environment = "Testing" } } @@ -779,3 +853,31 @@ resource "azurerm_subnet" "test" { } `, rInt, location, rInt, rInt) } + +func testAccAzureRMSubnet_serviceEndpointsVNetUpdate(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + Environment = "Staging" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" + service_endpoints = ["Microsoft.Sql", "Microsoft.Storage"] +} +`, rInt, location, rInt, rInt) +} diff --git a/azurerm/resource_arm_template_deployment.go b/azurerm/resource_arm_template_deployment.go index aeab59c1f123..fc2ebb6c24ff 100644 --- a/azurerm/resource_arm_template_deployment.go +++ b/azurerm/resource_arm_template_deployment.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -79,6 +80,19 @@ func resourceArmTemplateDeploymentCreateUpdate(d *schema.ResourceData, meta inte resourceGroup := d.Get("resource_group_name").(string) deploymentMode := d.Get("deployment_mode").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := deployClient.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Template Deployment %s (resource group %s) %v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_template_deployment", *existing.ID) + } + } + log.Printf("[INFO] preparing arguments for AzureRM Template Deployment creation.") properties := resources.DeploymentProperties{ Mode: resources.DeploymentMode(deploymentMode), @@ -127,7 +141,7 @@ func resourceArmTemplateDeploymentCreateUpdate(d *schema.ResourceData, meta inte } if err = future.WaitForCompletionRef(ctx, deployClient.Client); err != nil { - return fmt.Errorf("Error creating deployment: %+v", err) + return fmt.Errorf("Error waiting for deployment: %+v", err) } read, err := deployClient.Get(ctx, resourceGroup, name) diff --git a/azurerm/resource_arm_template_deployment_test.go b/azurerm/resource_arm_template_deployment_test.go index a4c81afc8975..b4f8f408bca6 100644 --- a/azurerm/resource_arm_template_deployment_test.go +++ b/azurerm/resource_arm_template_deployment_test.go @@ -13,32 +13,58 @@ import ( func TestAccAzureRMTemplateDeployment_basic(t *testing.T) { ri := tf.AccRandTimeInt() - config := testAccAzureRMTemplateDeployment_basicMultiple(ri, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMTemplateDeployment_basicMultiple(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), + ), + }, + }, + }) +} +func TestAccAzureRMTemplateDeployment_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + ri := tf.AccRandTimeInt() + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTemplateDeployment_basicMultiple(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), ), }, + { + Config: testAccAzureRMTemplateDeployment_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_template_deployment"), + }, }, }) } func TestAccAzureRMTemplateDeployment_disappears(t *testing.T) { ri := tf.AccRandTimeInt() - config := testAccAzureRMTemplateDeployment_basicSingle(ri, testLocation()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTemplateDeployment_basicSingle(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), testCheckAzureRMTemplateDeploymentDisappears("azurerm_template_deployment.test"), @@ -51,14 +77,14 @@ func TestAccAzureRMTemplateDeployment_disappears(t *testing.T) { func TestAccAzureRMTemplateDeployment_nestedTemplate(t *testing.T) { ri := tf.AccRandTimeInt() - config := testAccAzureRMTemplateDeployment_nestedTemplate(ri, testLocation()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTemplateDeployment_nestedTemplate(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), ), @@ -69,14 +95,14 @@ func TestAccAzureRMTemplateDeployment_nestedTemplate(t *testing.T) { func TestAccAzureRMTemplateDeployment_withParams(t *testing.T) { ri := tf.AccRandTimeInt() - config := testAccAzureRMTemplateDeployment_withParams(ri, testLocation()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTemplateDeployment_withParams(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), resource.TestCheckResourceAttr("azurerm_template_deployment.test", "outputs.testOutput", "Output Value"), @@ -88,14 +114,14 @@ func TestAccAzureRMTemplateDeployment_withParams(t *testing.T) { func TestAccAzureRMTemplateDeployment_withParamsBody(t *testing.T) { ri := tf.AccRandTimeInt() - config := testaccAzureRMTemplateDeployment_withParamsBody(ri, testLocation()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testaccAzureRMTemplateDeployment_withParamsBody(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), resource.TestCheckResourceAttr("azurerm_template_deployment.test", "outputs.testOutput", "Output Value"), @@ -108,14 +134,14 @@ func TestAccAzureRMTemplateDeployment_withParamsBody(t *testing.T) { func TestAccAzureRMTemplateDeployment_withOutputs(t *testing.T) { ri := tf.AccRandTimeInt() - config := testAccAzureRMTemplateDeployment_withOutputs(ri, testLocation()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTemplateDeployment_withOutputs(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), resource.TestCheckOutput("tfIntOutput", "-123"), @@ -136,14 +162,14 @@ func TestAccAzureRMTemplateDeployment_withOutputs(t *testing.T) { func TestAccAzureRMTemplateDeployment_withError(t *testing.T) { ri := tf.AccRandTimeInt() - config := testAccAzureRMTemplateDeployment_withError(ri, testLocation()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTemplateDeployment_withError(ri, testLocation()), ExpectError: regexp.MustCompile("Code=\"DeploymentFailed\""), }, }, @@ -342,6 +368,20 @@ DEPLOY `, rInt, location, rInt) } +func testAccAzureRMTemplateDeployment_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_template_deployment" "import" { + name = "${azurerm_template_deployment.test.name}" + resource_group_name = "${azurerm_template_deployment.test.resource_group_name}" + + template_body = "${azurerm_template_deployment.test.template_body}" + deployment_mode = "${azurerm_template_deployment.test.deployment_mode}" +} +`, testAccAzureRMTemplateDeployment_basicMultiple(rInt, location)) +} + func testAccAzureRMTemplateDeployment_nestedTemplate(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_traffic_manager_endpoint.go b/azurerm/resource_arm_traffic_manager_endpoint.go index 3aaf78642100..c1a8b934a56a 100644 --- a/azurerm/resource_arm_traffic_manager_endpoint.go +++ b/azurerm/resource_arm_traffic_manager_endpoint.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/trafficmanager/mgmt/2017-05-01/trafficmanager" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -114,6 +115,7 @@ func resourceArmTrafficManagerEndpoint() *schema.Resource { func resourceArmTrafficManagerEndpointCreateUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*ArmClient).trafficManagerEndpointsClient + ctx := meta.(*ArmClient).StopContext log.Printf("[INFO] preparing arguments for TrafficManager Endpoint creation.") @@ -123,14 +125,25 @@ func resourceArmTrafficManagerEndpointCreateUpdate(d *schema.ResourceData, meta profileName := d.Get("profile_name").(string) resourceGroup := d.Get("resource_group_name").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, profileName, endpointType, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Traffic Manager Endpoint %q (Resource Group %q): %v", name, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_traffic_manager_endpoint", *existing.ID) + } + } + params := trafficmanager.Endpoint{ Name: &name, Type: &fullEndpointType, EndpointProperties: getArmTrafficManagerEndpointProperties(d), } - ctx := meta.(*ArmClient).StopContext - if _, err := client.CreateOrUpdate(ctx, resourceGroup, profileName, endpointType, name, params); err != nil { return err } @@ -235,7 +248,7 @@ func getArmTrafficManagerEndpointProperties(d *schema.ResourceData) *trafficmana if resourceId := d.Get("target_resource_id").(string); resourceId != "" { endpointProps.TargetResourceID = utils.String(resourceId) - //TODO? Workaround for upstream behavior: if the target is blank instead of nil, the REST API will throw a 500 error. Remove if/when no longer necessary + //NOTE: Workaround for upstream behavior: if the target is blank instead of nil, the REST API will throw a 500 error if target == "" { endpointProps.Target = nil } diff --git a/azurerm/resource_arm_traffic_manager_endpoint_test.go b/azurerm/resource_arm_traffic_manager_endpoint_test.go index 1718865b95a9..0a5e57cb06f8 100644 --- a/azurerm/resource_arm_traffic_manager_endpoint_test.go +++ b/azurerm/resource_arm_traffic_manager_endpoint_test.go @@ -15,7 +15,6 @@ func TestAccAzureRMTrafficManagerEndpoint_basic(t *testing.T) { azureResourceName := "azurerm_traffic_manager_endpoint.testAzure" externalResourceName := "azurerm_traffic_manager_endpoint.testExternal" ri := tf.AccRandTimeInt() - config := testAccAzureRMTrafficManagerEndpoint_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,7 +22,7 @@ func TestAccAzureRMTrafficManagerEndpoint_basic(t *testing.T) { CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTrafficManagerEndpoint_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTrafficManagerEndpointExists(azureResourceName), testCheckAzureRMTrafficManagerEndpointExists(externalResourceName), @@ -39,12 +38,42 @@ func TestAccAzureRMTrafficManagerEndpoint_basic(t *testing.T) { }, }) } +func TestAccAzureRMTrafficManagerEndpoint_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + azureResourceName := "azurerm_traffic_manager_endpoint.testAzure" + externalResourceName := "azurerm_traffic_manager_endpoint.testExternal" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMTrafficManagerEndpoint_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists(azureResourceName), + testCheckAzureRMTrafficManagerEndpointExists(externalResourceName), + resource.TestCheckResourceAttr(azureResourceName, "endpoint_status", "Enabled"), + resource.TestCheckResourceAttr(externalResourceName, "endpoint_status", "Enabled"), + ), + }, + { + Config: testAccAzureRMTrafficManagerEndpoint_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_traffic_manager_endpoint"), + }, + }, + }) +} func TestAccAzureRMTrafficManagerEndpoint_disappears(t *testing.T) { azureResourceName := "azurerm_traffic_manager_endpoint.testAzure" externalResourceName := "azurerm_traffic_manager_endpoint.testExternal" ri := tf.AccRandTimeInt() - config := testAccAzureRMTrafficManagerEndpoint_basic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -52,7 +81,7 @@ func TestAccAzureRMTrafficManagerEndpoint_disappears(t *testing.T) { CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTrafficManagerEndpoint_basic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTrafficManagerEndpointExists(azureResourceName), testCheckAzureRMTrafficManagerEndpointExists(externalResourceName), @@ -394,6 +423,21 @@ resource "azurerm_traffic_manager_endpoint" "testExternal" { `, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt) } +func testAccAzureRMTrafficManagerEndpoint_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_traffic_manager_endpoint" "import" { + name = "${azurerm_traffic_manager_endpoint.testAzure.name}" + type = "${azurerm_traffic_manager_endpoint.testAzure.type}" + target_resource_id = "${azurerm_traffic_manager_endpoint.testAzure.target_resource_id}" + weight = "${azurerm_traffic_manager_endpoint.testAzure.weight}" + profile_name = "${azurerm_traffic_manager_endpoint.testAzure.profile_name}" + resource_group_name = "${azurerm_traffic_manager_endpoint.testAzure.resource_group_name}" +} +`, testAccAzureRMTrafficManagerEndpoint_basic(rInt, location)) +} + func testAccAzureRMTrafficManagerEndpoint_basicDisableExternal(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -777,7 +821,7 @@ resource "azurerm_traffic_manager_profile" "test" { path = "/" } - tags { + tags = { environment = "Production" } } @@ -816,7 +860,7 @@ resource "azurerm_traffic_manager_profile" "test" { path = "/" } - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_traffic_manager_profile.go b/azurerm/resource_arm_traffic_manager_profile.go index 06277ccb998a..12148012823b 100644 --- a/azurerm/resource_arm_traffic_manager_profile.go +++ b/azurerm/resource_arm_traffic_manager_profile.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -116,6 +117,7 @@ func resourceArmTrafficManagerProfile() *schema.Resource { func resourceArmTrafficManagerProfileCreateUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*ArmClient).trafficManagerProfilesClient + ctx := meta.(*ArmClient).StopContext log.Printf("[INFO] preparing arguments for Azure ARM virtual network creation.") @@ -125,6 +127,19 @@ func resourceArmTrafficManagerProfileCreateUpdate(d *schema.ResourceData, meta i resGroup := d.Get("resource_group_name").(string) tags := d.Get("tags").(map[string]interface{}) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing TrafficManager profile %s (resource group %s) ID", name, resGroup) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_traffic_manager_profile", *existing.ID) + } + } + profile := trafficmanager.Profile{ Name: &name, Location: &location, @@ -132,7 +147,6 @@ func resourceArmTrafficManagerProfileCreateUpdate(d *schema.ResourceData, meta i Tags: expandTags(tags), } - ctx := meta.(*ArmClient).StopContext if _, err := client.CreateOrUpdate(ctx, resGroup, name, profile); err != nil { return err } diff --git a/azurerm/resource_arm_traffic_manager_profile_test.go b/azurerm/resource_arm_traffic_manager_profile_test.go index c2021d2f8c0c..aad2fd0bc4b5 100644 --- a/azurerm/resource_arm_traffic_manager_profile_test.go +++ b/azurerm/resource_arm_traffic_manager_profile_test.go @@ -23,7 +23,6 @@ func getTrafficManagerFQDN(hostname string) (string, error) { func TestAccAzureRMTrafficManagerProfile_geographic(t *testing.T) { resourceName := "azurerm_traffic_manager_profile.test" ri := tf.AccRandTimeInt() - config := testAccAzureRMTrafficManagerProfile_geographic(ri, testLocation()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -31,7 +30,7 @@ func TestAccAzureRMTrafficManagerProfile_geographic(t *testing.T) { CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMTrafficManagerProfile_geographic(ri, testLocation()), Check: resource.ComposeTestCheckFunc( testCheckAzureRMTrafficManagerProfileExists(resourceName), resource.TestCheckResourceAttr(resourceName, "traffic_routing_method", "Geographic"), @@ -45,6 +44,34 @@ func TestAccAzureRMTrafficManagerProfile_geographic(t *testing.T) { }, }) } +func TestAccAzureRMTrafficManagerProfile_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_traffic_manager_profile.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMTrafficManagerProfile_geographic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerProfileExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "traffic_routing_method", "Geographic"), + ), + }, + { + Config: testAccAzureRMTrafficManagerProfile_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_traffic_manager_profile"), + }, + }, + }) +} func TestAccAzureRMTrafficManagerProfile_weighted(t *testing.T) { resourceName := "azurerm_traffic_manager_profile.test" @@ -364,6 +391,28 @@ resource "azurerm_traffic_manager_profile" "test" { } `, rInt, location, rInt, rInt) } +func testAccAzureRMTrafficManagerProfile_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_traffic_manager_profile" "import" { + name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_traffic_manager_profile.test.resource_group_name}" + traffic_routing_method = "${azurerm_traffic_manager_profile.test.traffic_routing_method}" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} +`, testAccAzureRMTrafficManagerProfile_geographic(rInt, location), rInt) +} func testAccAzureRMTrafficManagerProfile_weighted(rInt int, location string) string { return fmt.Sprintf(` @@ -491,7 +540,7 @@ resource "azurerm_traffic_manager_profile" "test" { path = "/" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -522,7 +571,7 @@ resource "azurerm_traffic_manager_profile" "test" { path = "/" } - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_user_assigned_identity.go b/azurerm/resource_arm_user_assigned_identity.go index 8545e2813dad..fbedbc31cada 100644 --- a/azurerm/resource_arm_user_assigned_identity.go +++ b/azurerm/resource_arm_user_assigned_identity.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/preview/msi/mgmt/2015-08-31-preview/msi" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -57,6 +58,20 @@ func resourceArmUserAssignedIdentityCreateUpdate(d *schema.ResourceData, meta in location := d.Get("location").(string) resGroup := d.Get("resource_group_name").(string) tags := d.Get("tags").(map[string]interface{}) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing User Assigned Identity %q (Resource Group %q): %+v", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_user_assigned_identity", *existing.ID) + } + } + identity := msi.Identity{ Name: &name, Location: &location, diff --git a/azurerm/resource_arm_user_assigned_identity_test.go b/azurerm/resource_arm_user_assigned_identity_test.go index f9f97d69ea14..bc553d1d7a27 100644 --- a/azurerm/resource_arm_user_assigned_identity_test.go +++ b/azurerm/resource_arm_user_assigned_identity_test.go @@ -18,14 +18,14 @@ func TestAccAzureRMUserAssignedIdentity_basic(t *testing.T) { resourceName := "azurerm_user_assigned_identity.test" ri := tf.AccRandTimeInt() rs := acctest.RandString(14) - config := testAccAzureRMUserAssignedIdentity_basic(ri, testLocation(), rs) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testCheckAzureRMUserAssignedIdentityDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMUserAssignedIdentity_basic(ri, testLocation(), rs), Check: resource.ComposeTestCheckFunc( testCheckAzureRMUserAssignedIdentityExists(resourceName), resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile(generatedUuidRegex)), @@ -40,6 +40,37 @@ func TestAccAzureRMUserAssignedIdentity_basic(t *testing.T) { }, }) } +func TestAccAzureRMUserAssignedIdentity_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + generatedUuidRegex := "^[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$" + resourceName := "azurerm_user_assigned_identity.test" + ri := tf.AccRandTimeInt() + rs := acctest.RandString(14) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMUserAssignedIdentityDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMUserAssignedIdentity_basic(ri, testLocation(), rs), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMUserAssignedIdentityExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile(generatedUuidRegex)), + resource.TestMatchResourceAttr(resourceName, "client_id", regexp.MustCompile(generatedUuidRegex)), + ), + }, + { + Config: testAccAzureRMUserAssignedIdentity_requiresImport(ri, testLocation(), rs), + ExpectError: testRequiresImportError("azurerm_user_assigned_identity"), + }, + }, + }) +} func testCheckAzureRMUserAssignedIdentityExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -113,3 +144,15 @@ resource "azurerm_user_assigned_identity" "test" { } `, rInt, location, rString) } + +func testAccAzureRMUserAssignedIdentity_requiresImport(rInt int, location string, rString string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_user_assigned_identity" "import" { + name = "${azurerm_user_assigned_identity.test.name}" + resource_group_name = "${azurerm_user_assigned_identity.test.resource_group_name}" + location = "${azurerm_user_assigned_identity.test.location}" +} +`, testAccAzureRMUserAssignedIdentity_basic(rInt, location, rString)) +} diff --git a/azurerm/resource_arm_virtual_machine.go b/azurerm/resource_arm_virtual_machine.go index 56294b23242c..e37ed69d915a 100644 --- a/azurerm/resource_arm_virtual_machine.go +++ b/azurerm/resource_arm_virtual_machine.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" "golang.org/x/net/context" ) @@ -560,8 +561,22 @@ func resourceArmVirtualMachineCreateUpdate(d *schema.ResourceData, meta interfac log.Printf("[INFO] preparing arguments for Azure ARM Virtual Machine creation.") name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Virtual Machine %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_machine", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) expandedTags := expandTags(tags) zones := expandZones(d.Get("zones").([]interface{})) diff --git a/azurerm/resource_arm_virtual_machine_data_disk_attachment.go b/azurerm/resource_arm_virtual_machine_data_disk_attachment.go index 4bb9f52d5edc..e1e29a119e6b 100644 --- a/azurerm/resource_arm_virtual_machine_data_disk_attachment.go +++ b/azurerm/resource_arm_virtual_machine_data_disk_attachment.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -112,6 +113,7 @@ func resourceArmVirtualMachineDataDiskAttachmentCreateUpdate(d *schema.ResourceD } name := *managedDisk.Name + resourceId := fmt.Sprintf("%s/dataDisks/%s", virtualMachineId, name) lun := int32(d.Get("lun").(int)) caching := d.Get("caching").(string) createOption := compute.DiskCreateOptionTypes(d.Get("create_option").(string)) @@ -130,18 +132,24 @@ func resourceArmVirtualMachineDataDiskAttachmentCreateUpdate(d *schema.ResourceD } disks := *virtualMachine.StorageProfile.DataDisks + + existingIndex := -1 + for i, disk := range disks { + if *disk.Name == name { + existingIndex = i + break + } + } + if d.IsNewResource() { - disks = append(disks, expandedDisk) - } else { - // iterate over the disks and swap it out in-place - existingIndex := -1 - for i, disk := range disks { - if *disk.Name == name { - existingIndex = i - break + if requireResourcesToBeImported { + if existingIndex != -1 { + return tf.ImportAsExistsError("azurerm_virtual_machine_data_disk_attachment", resourceId) } } + disks = append(disks, expandedDisk) + } else { if existingIndex == -1 { return fmt.Errorf("Unable to find Disk %q attached to Virtual Machine %q (Resource Group %q)", name, virtualMachineName, resourceGroup) } @@ -166,8 +174,7 @@ func resourceArmVirtualMachineDataDiskAttachmentCreateUpdate(d *schema.ResourceD return fmt.Errorf("Error waiting for Virtual Machine %q (Resource Group %q) to finish updating Disk %q: %+v", virtualMachineName, resourceGroup, name, err) } - d.SetId(fmt.Sprintf("%s/dataDisks/%s", virtualMachineId, name)) - + d.SetId(resourceId) return resourceArmVirtualMachineDataDiskAttachmentRead(d, meta) } diff --git a/azurerm/resource_arm_virtual_machine_data_disk_attachment_test.go b/azurerm/resource_arm_virtual_machine_data_disk_attachment_test.go index ab9a72a7532b..3178ad639cb1 100644 --- a/azurerm/resource_arm_virtual_machine_data_disk_attachment_test.go +++ b/azurerm/resource_arm_virtual_machine_data_disk_attachment_test.go @@ -40,6 +40,34 @@ func TestAccAzureRMVirtualMachineDataDiskAttachment_basic(t *testing.T) { }) } +func TestAccAzureRMVirtualMachineDataDiskAttachment_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_machine_data_disk_attachment.test" + ri := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDataDiskAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualMachineDataDiskAttachment_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineDataDiskAttachmentExists(resourceName), + ), + }, + { + Config: testAccAzureRMVirtualMachineDataDiskAttachment_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_machine_data_disk_attachment"), + }, + }, + }) +} + func TestAccAzureRMVirtualMachineDataDiskAttachment_multipleDisks(t *testing.T) { firstResourceName := "azurerm_virtual_machine_data_disk_attachment.first" secondResourceName := "azurerm_virtual_machine_data_disk_attachment.second" @@ -281,6 +309,20 @@ resource "azurerm_virtual_machine_data_disk_attachment" "test" { `, template) } +func testAccAzureRMVirtualMachineDataDiskAttachment_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualMachineDataDiskAttachment_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_machine_data_disk_attachment" "import" { + managed_disk_id = "${azurerm_virtual_machine_data_disk_attachment.test.managed_disk_id}" + virtual_machine_id = "${azurerm_virtual_machine_data_disk_attachment.test.virtual_machine_id}" + lun = "${azurerm_virtual_machine_data_disk_attachment.test.lun}" + caching = "${azurerm_virtual_machine_data_disk_attachment.test.caching}" +} +`, template) +} + func testAccAzureRMVirtualMachineDataDiskAttachment_multipleDisks(rInt int, location string) string { template := testAccAzureRMVirtualMachineDataDiskAttachment_template(rInt, location) return fmt.Sprintf(` @@ -574,7 +616,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "staging" } } @@ -594,7 +636,7 @@ resource "azurerm_virtual_machine_extension" "test" { } SETTINGS - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_virtual_machine_extension.go b/azurerm/resource_arm_virtual_machine_extension.go index 6e4bcce43cbe..768ca29ca9ad 100644 --- a/azurerm/resource_arm_virtual_machine_extension.go +++ b/azurerm/resource_arm_virtual_machine_extension.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/structure" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -83,9 +84,23 @@ func resourceArmVirtualMachineExtensionsCreateUpdate(d *schema.ResourceData, met ctx := meta.(*ArmClient).StopContext name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) vmName := d.Get("virtual_machine_name").(string) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, vmName, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Extension %q (Virtual Machine %q / Resource Group %q): %s", name, vmName, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_machine_extension", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) publisher := d.Get("publisher").(string) extensionType := d.Get("type").(string) typeHandlerVersion := d.Get("type_handler_version").(string) diff --git a/azurerm/resource_arm_virtual_machine_extension_test.go b/azurerm/resource_arm_virtual_machine_extension_test.go index 472e0b5eb40a..fc49a51f31c7 100644 --- a/azurerm/resource_arm_virtual_machine_extension_test.go +++ b/azurerm/resource_arm_virtual_machine_extension_test.go @@ -47,6 +47,35 @@ func TestAccAzureRMVirtualMachineExtension_basic(t *testing.T) { }) } +func TestAccAzureRMVirtualMachineExtension_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_machine_extension.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineExtensionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualMachineExtension_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExtensionExists(resourceName), + ), + }, + { + Config: testAccAzureRMVirtualMachineExtension_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_machine_extension"), + }, + }, + }) +} + func TestAccAzureRMVirtualMachineExtension_concurrent(t *testing.T) { firstResourceName := "azurerm_virtual_machine_extension.test" secondResourceName := "azurerm_virtual_machine_extension.test2" @@ -185,7 +214,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -244,13 +273,33 @@ resource "azurerm_virtual_machine_extension" "test" { } SETTINGS - tags { + tags = { environment = "Production" } } `, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt) } +func testAccAzureRMVirtualMachineExtension_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualMachineExtension_basic(rInt, location) + return fmt.Sprintf(` +%s + + +resource "azurerm_virtual_machine_extension" "import" { + name = "${azurerm_virtual_machine_extension.test.name}" + resource_group_name = "${azurerm_virtual_machine_extension.test.resource_group_name}" + virtual_machine_name = "${azurerm_virtual_machine_extension.test.virtual_machine_name}" + location = "${azurerm_virtual_machine_extension.test.location}" + publisher = "${azurerm_virtual_machine_extension.test.publisher}" + type = "${azurerm_virtual_machine_extension.test.type}" + type_handler_version = "${azurerm_virtual_machine_extension.test.type_handler_version}" + settings = "${azurerm_virtual_machine_extension.test.settings}" + tags = "${azurerm_virtual_machine_extension.test.tags}" +} +`, template) +} + func testAccAzureRMVirtualMachineExtension_basicUpdate(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -291,7 +340,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -350,7 +399,7 @@ resource "azurerm_virtual_machine_extension" "test" { } SETTINGS - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -398,7 +447,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -516,7 +565,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -576,7 +625,7 @@ resource "azurerm_virtual_machine_extension" "test" { } SETTINGS - tags { + tags = { environment = "Production" } } diff --git a/azurerm/resource_arm_virtual_machine_managed_disks_test.go b/azurerm/resource_arm_virtual_machine_managed_disks_test.go index 8d4944b69f84..62b967390bf6 100644 --- a/azurerm/resource_arm_virtual_machine_managed_disks_test.go +++ b/azurerm/resource_arm_virtual_machine_managed_disks_test.go @@ -40,6 +40,36 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_standardSSD(t *t }) } +func TestAccAzureRMVirtualMachine_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_machine.test" + var vm compute.VirtualMachine + + ri := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_standardSSD(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists(resourceName, &vm), + ), + }, + { + Config: testAccAzureRMVirtualMachine_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_machine"), + }, + }, + }) +} + func TestAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_explicit(t *testing.T) { var vm compute.VirtualMachine ri := tf.AccRandTimeInt() @@ -641,7 +671,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -725,7 +755,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -745,7 +775,7 @@ resource "azurerm_resource_group" "test" { name = "${var.prefix}-resources" location = "%s" - tags { + tags = { source = "TestAccAzureRMVirtualMachine_winRMCerts" } } @@ -1005,7 +1035,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1082,7 +1112,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1155,7 +1185,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1163,6 +1193,51 @@ resource "azurerm_virtual_machine" "test" { `, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt) } +func testAccAzureRMVirtualMachine_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_standardSSD(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_machine" "import" { + name = "${azurerm_virtual_machine.test.name}" + location = "${azurerm_virtual_machine.test.location}" + resource_group_name = "${azurerm_virtual_machine.test.resource_group_name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_D1_v2" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "osd-%d" + caching = "ReadWrite" + create_option = "FromImage" + disk_size_gb = "50" + managed_disk_type = "StandardSSD_LRS" + } + + os_profile { + computer_name = "hn%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags = { + environment = "Production" + cost-center = "Ops" + } +} +`, template, rInt, rInt) +} + func testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_implicit(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -1227,7 +1302,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1300,7 +1375,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1398,7 +1473,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1481,7 +1556,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1563,7 +1638,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1714,7 +1789,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1793,7 +1868,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1876,7 +1951,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1968,7 +2043,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -2038,7 +2113,7 @@ resource "azurerm_virtual_machine" "test" { os_profile_windows_config {} - tags { + tags = { environment = "Production" cost-center = "Ops" } diff --git a/azurerm/resource_arm_virtual_machine_scale_set.go b/azurerm/resource_arm_virtual_machine_scale_set.go index dd499f5fd6c8..f0f27b3b1ab0 100644 --- a/azurerm/resource_arm_virtual_machine_scale_set.go +++ b/azurerm/resource_arm_virtual_machine_scale_set.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -746,8 +747,22 @@ func resourceArmVirtualMachineScaleSetCreateUpdate(d *schema.ResourceData, meta log.Printf("[INFO] preparing arguments for Azure ARM Virtual Machine Scale Set creation.") name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Virtual Machine Scale Set %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_machine_scale_set", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) zones := expandZones(d.Get("zones").([]interface{})) diff --git a/azurerm/resource_arm_virtual_machine_scale_set_test.go b/azurerm/resource_arm_virtual_machine_scale_set_test.go index e2dd7787f6c3..9b6c551219df 100644 --- a/azurerm/resource_arm_virtual_machine_scale_set_test.go +++ b/azurerm/resource_arm_virtual_machine_scale_set_test.go @@ -44,6 +44,34 @@ func TestAccAzureRMVirtualMachineScaleSet_basic(t *testing.T) { }) } +func TestAccAzureRMVirtualMachineScaleSet_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_machine_scale_set.test" + ri := tf.AccRandTimeInt() + location := testLocation() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualMachineScaleSet_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + ), + }, + { + Config: testAccAzureRMVirtualMachineScaleSet_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_machine_scale_set"), + }, + }, + }) +} + func TestAccAzureRMVirtualMachineScaleSet_evictionPolicyDelete(t *testing.T) { resourceName := "azurerm_virtual_machine_scale_set.test" ri := tf.AccRandTimeInt() @@ -1290,7 +1318,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1348,6 +1376,57 @@ resource "azurerm_virtual_machine_scale_set" "test" { `, rInt, location) } +func testAccAzureRMVirtualMachineScaleSet_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualMachineScaleSet_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_machine_scale_set" "import" { + name = "${azurerm_virtual_machine_scale_set.test.name}" + location = "${azurerm_virtual_machine_scale_set.test.location}" + resource_group_name = "${azurerm_virtual_machine_scale_set.test.resource_group_name}" + upgrade_policy_mode = "Manual" + + sku { + name = "Standard_D1_v2" + tier = "Standard" + capacity = 2 + } + + os_profile { + computer_name_prefix = "testvm-%d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } + + network_profile { + name = "TestNetworkProfile-%d" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + primary = true + subnet_id = "${azurerm_subnet.test.id}" + } + } + + storage_profile_os_disk { + name = "osDiskProfile" + caching = "ReadWrite" + create_option = "FromImage" + vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, template, rInt, rInt) +} + func testAccAzureRMVirtualMachineScaleSet_evictionPolicyDelete(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -1376,7 +1455,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1464,7 +1543,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1551,7 +1630,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1649,7 +1728,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1736,7 +1815,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1823,7 +1902,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1910,7 +1989,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2000,7 +2079,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2090,7 +2169,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2189,7 +2268,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2284,7 +2363,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2574,7 +2653,7 @@ resource "azurerm_virtual_machine_scale_set" "test" { version = "latest" } - tags { + tags = { ThisIs = "a test" } } @@ -3173,7 +3252,7 @@ resource "azurerm_application_gateway" "test" { backend_http_settings_name = "backend-http-1" } - tags { + tags = { environment = "tf01" } } @@ -4187,7 +4266,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -4366,7 +4445,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "Dev" } } @@ -4410,7 +4489,7 @@ resource "azurerm_virtual_machine" "testsource" { disable_password_authentication = false } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -4429,7 +4508,7 @@ resource "azurerm_image" "test" { caching = "None" } - tags { + tags = { environment = "Dev" cost-center = "Ops" } @@ -4505,7 +4584,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } diff --git a/azurerm/resource_arm_virtual_machine_test.go b/azurerm/resource_arm_virtual_machine_test.go index ca7e55dcd858..7442c6e6df78 100644 --- a/azurerm/resource_arm_virtual_machine_test.go +++ b/azurerm/resource_arm_virtual_machine_test.go @@ -288,7 +288,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -332,7 +332,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -384,7 +384,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -435,7 +435,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -488,7 +488,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -543,7 +543,7 @@ resource "azurerm_virtual_machine" "test" { identity_ids = ["${azurerm_user_assigned_identity.test.id}"] } - tags { + tags = { environment = "Production" cost-center = "Ops" } diff --git a/azurerm/resource_arm_virtual_machine_unmanaged_disks_test.go b/azurerm/resource_arm_virtual_machine_unmanaged_disks_test.go index a0bb0a447348..820a9ec73486 100644 --- a/azurerm/resource_arm_virtual_machine_unmanaged_disks_test.go +++ b/azurerm/resource_arm_virtual_machine_unmanaged_disks_test.go @@ -2,6 +2,7 @@ package azurerm import ( "fmt" + "log" "regexp" "strings" "testing" @@ -570,7 +571,9 @@ func TestAccAzureRMVirtualMachine_optionalOSProfile(t *testing.T) { Destroy: false, Config: prepConfig, Check: func(s *terraform.State) error { - testCheckAzureRMVirtualMachineDestroy(s) + if err := testCheckAzureRMVirtualMachineDestroy(s); err != nil { + log.Printf("[DEBUG] WARNING testCheckAzureRMVirtualMachineDestroy error'd: %v", err) + } return nil }, }, @@ -643,7 +646,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -687,7 +690,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -735,7 +738,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -789,7 +792,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -852,7 +855,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" } } @@ -899,7 +902,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -947,7 +950,7 @@ resource "azurerm_virtual_machine" "test" { } } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1039,7 +1042,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1083,7 +1086,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1136,7 +1139,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1191,7 +1194,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1244,7 +1247,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1298,7 +1301,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1352,7 +1355,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1404,7 +1407,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -1452,7 +1455,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1495,7 +1498,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" } } @@ -1542,7 +1545,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1628,7 +1631,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1715,7 +1718,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1808,7 +1811,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1901,7 +1904,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -1989,7 +1992,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2083,7 +2086,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2177,7 +2180,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2264,7 +2267,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2309,7 +2312,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -2357,7 +2360,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2402,7 +2405,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -2450,7 +2453,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2494,7 +2497,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -2542,7 +2545,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2592,7 +2595,7 @@ resource "azurerm_virtual_machine" "test" { product = "vlm-azure" } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -2823,7 +2826,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2868,7 +2871,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "Production" cost-center = "Ops" } @@ -2916,7 +2919,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2970,7 +2973,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -2997,7 +3000,7 @@ resource "azurerm_virtual_machine" "test" { create_option = "Attach" } - tags { + tags = { environment = "Production" cost-center = "Ops" } diff --git a/azurerm/resource_arm_virtual_network.go b/azurerm/resource_arm_virtual_network.go index 1dde17e85b65..830c5fa28ef0 100644 --- a/azurerm/resource_arm_virtual_network.go +++ b/azurerm/resource_arm_virtual_network.go @@ -8,6 +8,7 @@ import ( "net/http" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/hashcode" @@ -119,8 +120,22 @@ func resourceArmVirtualNetworkCreateUpdate(d *schema.ResourceData, meta interfac log.Printf("[INFO] preparing arguments for Azure ARM virtual network creation.") name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Virtual Network %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_network", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) vnetProperties, vnetPropsErr := expandVirtualNetworkProperties(ctx, d, meta) if vnetPropsErr != nil { @@ -412,36 +427,19 @@ func resourceAzureSubnetHash(v interface{}) int { } func getExistingSubnet(ctx context.Context, resGroup string, vnetName string, subnetName string, meta interface{}) (*network.Subnet, error) { - //attempt to retrieve existing subnet from the server - existingSubnet := network.Subnet{} subnetClient := meta.(*ArmClient).subnetClient resp, err := subnetClient.Get(ctx, resGroup, vnetName, subnetName, "") if err != nil { if resp.StatusCode == http.StatusNotFound { - return &existingSubnet, nil + return &network.Subnet{}, nil } //raise an error if there was an issue other than 404 in getting subnet properties return nil, err } - existingSubnet.SubnetPropertiesFormat = &network.SubnetPropertiesFormat{ - AddressPrefix: resp.SubnetPropertiesFormat.AddressPrefix, - } - - if resp.SubnetPropertiesFormat.NetworkSecurityGroup != nil { - existingSubnet.SubnetPropertiesFormat.NetworkSecurityGroup = resp.SubnetPropertiesFormat.NetworkSecurityGroup - } - - if resp.SubnetPropertiesFormat.RouteTable != nil { - existingSubnet.SubnetPropertiesFormat.RouteTable = resp.SubnetPropertiesFormat.RouteTable - } - - if resp.SubnetPropertiesFormat.IPConfigurations != nil { - existingSubnet.SubnetPropertiesFormat.IPConfigurations = resp.SubnetPropertiesFormat.IPConfigurations - } - - return &existingSubnet, nil + // Return it directly rather than copy the fields to prevent potential uncovered properties (for example, `ServiceEndpoints` mentioned in #1619) + return &resp, nil } func expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d *schema.ResourceData) ([]string, error) { diff --git a/azurerm/resource_arm_virtual_network_gateway.go b/azurerm/resource_arm_virtual_network_gateway.go index a49ef701298d..eeb0a5283b3c 100644 --- a/azurerm/resource_arm_virtual_network_gateway.go +++ b/azurerm/resource_arm_virtual_network_gateway.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -273,8 +274,22 @@ func resourceArmVirtualNetworkGatewayCreateUpdate(d *schema.ResourceData, meta i log.Printf("[INFO] preparing arguments for AzureRM Virtual Network Gateway creation.") name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Virtual Network Gateway %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_network_gateway", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) properties, err := getArmVirtualNetworkGatewayProperties(d) diff --git a/azurerm/resource_arm_virtual_network_gateway_connection.go b/azurerm/resource_arm_virtual_network_gateway_connection.go index 27b89244e03c..33ad6bfb7a3a 100644 --- a/azurerm/resource_arm_virtual_network_gateway_connection.go +++ b/azurerm/resource_arm_virtual_network_gateway_connection.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -231,8 +232,22 @@ func resourceArmVirtualNetworkGatewayConnectionCreateUpdate(d *schema.ResourceDa log.Printf("[INFO] preparing arguments for AzureRM Virtual Network Gateway Connection creation.") name := d.Get("name").(string) - location := azureRMNormalizeLocation(d.Get("location").(string)) resGroup := d.Get("resource_group_name").(string) + + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Virtual Network Gateway Connection %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_network_gateway_connection", *existing.ID) + } + } + + location := azureRMNormalizeLocation(d.Get("location").(string)) tags := d.Get("tags").(map[string]interface{}) properties, err := getArmVirtualNetworkGatewayConnectionProperties(d) diff --git a/azurerm/resource_arm_virtual_network_gateway_connection_test.go b/azurerm/resource_arm_virtual_network_gateway_connection_test.go index 0756053063f1..275903986a9f 100644 --- a/azurerm/resource_arm_virtual_network_gateway_connection_test.go +++ b/azurerm/resource_arm_virtual_network_gateway_connection_test.go @@ -23,7 +23,7 @@ func TestAccAzureRMVirtualNetworkGatewayConnection_sitetosite(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkGatewayConnectionExists("azurerm_virtual_network_gateway_connection.test"), + testCheckAzureRMVirtualNetworkGatewayConnectionExists(resourceName), ), }, { @@ -35,6 +35,35 @@ func TestAccAzureRMVirtualNetworkGatewayConnection_sitetosite(t *testing.T) { }) } +func TestAccAzureRMVirtualNetworkGatewayConnection_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_network_gateway_connection.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkGatewayConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualNetworkGatewayConnection_sitetosite(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkGatewayConnectionExists(resourceName), + ), + }, + { + Config: testAccAzureRMVirtualNetworkGatewayConnection_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_network_gateway_connection"), + }, + }, + }) +} + func TestAccAzureRMVirtualNetworkGatewayConnection_vnettonet(t *testing.T) { firstResourceName := "azurerm_virtual_network_gateway_connection.test_1" secondResourceName := "azurerm_virtual_network_gateway_connection.test_2" @@ -244,6 +273,23 @@ resource "azurerm_virtual_network_gateway_connection" "test" { `, rInt, location) } +func testAccAzureRMVirtualNetworkGatewayConnection_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualNetworkGatewayConnection_sitetosite(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_network_gateway_connection" "import" { + name = "${azurerm_virtual_network_gateway_connection.test.name}" + location = "${azurerm_virtual_network_gateway_connection.test.location}" + resource_group_name = "${azurerm_virtual_network_gateway_connection.test.resource_group_name}" + type = "${azurerm_virtual_network_gateway_connection.test.type}" + virtual_network_gateway_id = "${azurerm_virtual_network_gateway_connection.test.virtual_network_gateway_id}" + local_network_gateway_id = "${azurerm_virtual_network_gateway_connection.test.local_network_gateway_id}" + shared_key = "${azurerm_virtual_network_gateway_connection.test.shared_key}" +} +`, template) +} + func testAccAzureRMVirtualNetworkGatewayConnection_vnettovnet(rInt, rInt2 int, sharedKey, location, altLocation string) string { return fmt.Sprintf(` variable "random1" { diff --git a/azurerm/resource_arm_virtual_network_gateway_test.go b/azurerm/resource_arm_virtual_network_gateway_test.go index 1adc6c66ecfa..8ef796706bb0 100644 --- a/azurerm/resource_arm_virtual_network_gateway_test.go +++ b/azurerm/resource_arm_virtual_network_gateway_test.go @@ -36,6 +36,35 @@ func TestAccAzureRMVirtualNetworkGateway_basic(t *testing.T) { }) } +func TestAccAzureRMVirtualNetworkGateway_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_network_gateway.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualNetworkGateway_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkGatewayExists(resourceName), + ), + }, + { + Config: testAccAzureRMVirtualNetworkGateway_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_network_gateway"), + }, + }, + }) +} + func TestAccAzureRMVirtualNetworkGateway_lowerCaseSubnetName(t *testing.T) { ri := tf.AccRandTimeInt() resourceName := "azurerm_virtual_network_gateway.test" @@ -348,6 +377,28 @@ resource "azurerm_virtual_network_gateway" "test" { `, rInt, location, rInt, rInt, rInt) } +func testAccAzureRMVirtualNetworkGateway_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualNetworkGateway_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_network_gateway" "import" { + name = "${azurerm_virtual_network_gateway.test.name}" + location = "${azurerm_virtual_network_gateway.test.location}" + resource_group_name = "${azurerm_virtual_network_gateway.test.resource_group_name}" + type = "${azurerm_virtual_network_gateway.test.type}" + vpn_type = "${azurerm_virtual_network_gateway.test.vpn_type}" + sku = "${azurerm_virtual_network_gateway.test.sku}" + + ip_configuration { + public_ip_address_id = "${azurerm_public_ip.test.id}" + private_ip_address_allocation = "Dynamic" + subnet_id = "${azurerm_subnet.test.id}" + } +} +`, template) +} + func testAccAzureRMVirtualNetworkGateway_lowerCaseSubnetName(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_virtual_network_peering.go b/azurerm/resource_arm_virtual_network_peering.go index 9f7a6e97e583..095085f9fc83 100644 --- a/azurerm/resource_arm_virtual_network_peering.go +++ b/azurerm/resource_arm_virtual_network_peering.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network" "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -82,6 +83,19 @@ func resourceArmVirtualNetworkPeeringCreateUpdate(d *schema.ResourceData, meta i vnetName := d.Get("virtual_network_name").(string) resGroup := d.Get("resource_group_name").(string) + if requireResourcesToBeImported && d.IsNewResource() { + existing, err := client.Get(ctx, resGroup, vnetName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Peering %q (Virtual Network %q / Resource Group %q): %s", name, vnetName, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_virtual_network_peering", *existing.ID) + } + } + peer := network.VirtualNetworkPeering{ Name: &name, VirtualNetworkPeeringPropertiesFormat: getVirtualNetworkPeeringProperties(d), diff --git a/azurerm/resource_arm_virtual_network_peering_test.go b/azurerm/resource_arm_virtual_network_peering_test.go index bed4e799a776..0a740831e5b8 100644 --- a/azurerm/resource_arm_virtual_network_peering_test.go +++ b/azurerm/resource_arm_virtual_network_peering_test.go @@ -40,6 +40,37 @@ func TestAccAzureRMVirtualNetworkPeering_basic(t *testing.T) { }) } +func TestAccAzureRMVirtualNetworkPeering_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + firstResourceName := "azurerm_virtual_network_peering.test1" + secondResourceName := "azurerm_virtual_network_peering.test2" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualNetworkPeering_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkPeeringExists(firstResourceName), + testCheckAzureRMVirtualNetworkPeeringExists(secondResourceName), + ), + }, + { + Config: testAccAzureRMVirtualNetworkPeering_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_network_peering"), + }, + }, + }) +} + func TestAccAzureRMVirtualNetworkPeering_disappears(t *testing.T) { firstResourceName := "azurerm_virtual_network_peering.test1" secondResourceName := "azurerm_virtual_network_peering.test2" @@ -236,6 +267,21 @@ resource "azurerm_virtual_network_peering" "test2" { `, rInt, location, rInt, rInt, rInt, rInt) } +func testAccAzureRMVirtualNetworkPeering_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualNetworkPeering_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_network_peering" "import" { + name = "${azurerm_virtual_network_peering.test1.name}" + resource_group_name = "${azurerm_virtual_network_peering.test1.resource_group_name}" + virtual_network_name = "${azurerm_virtual_network_peering.test1.virtual_network_name}" + remote_virtual_network_id = "${azurerm_virtual_network_peering.test1.remote_virtual_network_id}" + allow_virtual_network_access = "${azurerm_virtual_network_peering.test1.allow_virtual_network_access}" +} +`, template) +} + func testAccAzureRMVirtualNetworkPeering_basicUpdate(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_virtual_network_test.go b/azurerm/resource_arm_virtual_network_test.go index 66818483da1f..a99def1dc76c 100644 --- a/azurerm/resource_arm_virtual_network_test.go +++ b/azurerm/resource_arm_virtual_network_test.go @@ -38,6 +38,35 @@ func TestAccAzureRMVirtualNetwork_basic(t *testing.T) { }) } +func TestAccAzureRMVirtualNetwork_requiresImport(t *testing.T) { + if !requireResourcesToBeImported { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_virtual_network.test" + ri := tf.AccRandTimeInt() + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMVirtualNetwork_basic(ri, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkExists(resourceName), + ), + }, + { + Config: testAccAzureRMVirtualNetwork_requiresImport(ri, location), + ExpectError: testRequiresImportError("azurerm_virtual_network"), + }, + }, + }) +} + func TestAccAzureRMVirtualNetwork_ddosProtectionPlan(t *testing.T) { resourceName := "azurerm_virtual_network.test" ri := tf.AccRandTimeInt() @@ -253,6 +282,25 @@ resource "azurerm_virtual_network" "test" { `, rInt, location, rInt) } +func testAccAzureRMVirtualNetwork_requiresImport(rInt int, location string) string { + template := testAccAzureRMVirtualNetwork_basic(rInt, location) + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_network" "import" { + name = "${azurerm_virtual_network.test.name}" + location = "${azurerm_virtual_network.test.location}" + resource_group_name = "${azurerm_virtual_network.test.resource_group_name}" + address_space = ["10.0.0.0/16"] + + subnet { + name = "subnet1" + address_prefix = "10.0.1.0/24" + } +} +`, template) +} + func testAccAzureRMVirtualNetwork_ddosProtectionPlan(rInt int, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -303,7 +351,7 @@ resource "azurerm_virtual_network" "test" { address_prefix = "10.0.1.0/24" } - tags { + tags = { environment = "Production" cost_center = "MSFT" } @@ -329,7 +377,7 @@ resource "azurerm_virtual_network" "test" { address_prefix = "10.0.1.0/24" } - tags { + tags = { environment = "staging" } } @@ -357,7 +405,7 @@ resource "azurerm_virtual_network" "test" { address_space = ["${var.network_cidr}"] location = "${azurerm_resource_group.test.location}" - tags { + tags = { environment = "${var.environment}" } } @@ -395,7 +443,7 @@ resource "azurerm_network_security_group" "test" { destination_address_prefix = "*" } - tags { + tags = { environment = "${var.environment}" } } diff --git a/examples/aci-image-registry-credentials/main.tf b/examples/aci-image-registry-credentials/main.tf index 97f2f868e7f4..a2049a29d02f 100644 --- a/examples/aci-image-registry-credentials/main.tf +++ b/examples/aci-image-registry-credentials/main.tf @@ -37,7 +37,7 @@ resource "azurerm_container_group" "aci-test" { memory = "1.5" } - tags { + tags = { environment = "testing" } } diff --git a/examples/aci-linux-multi/main.tf b/examples/aci-linux-multi/main.tf index e7a9a8f3e8ad..5596f22cd3ae 100644 --- a/examples/aci-linux-multi/main.tf +++ b/examples/aci-linux-multi/main.tf @@ -32,7 +32,7 @@ resource "azurerm_container_group" "aci-example" { memory = "1.5" } - tags { + tags = { environment = "testing" } } diff --git a/examples/aci-linux-volume-mount/main.tf b/examples/aci-linux-volume-mount/main.tf index f83926796e51..01354fcea61e 100644 --- a/examples/aci-linux-volume-mount/main.tf +++ b/examples/aci-linux-volume-mount/main.tf @@ -53,7 +53,7 @@ resource "azurerm_container_group" "aci-example" { } } - tags { + tags = { environment = "testing" } } diff --git a/examples/acs-kubernetes/main.tf b/examples/acs-kubernetes/main.tf index 3a5c368128ac..f973790f5a62 100644 --- a/examples/acs-kubernetes/main.tf +++ b/examples/acs-kubernetes/main.tf @@ -2,7 +2,7 @@ resource "azurerm_resource_group" "resource_group" { name = "${var.resource_group_name}" location = "${var.resource_group_location}" - tags { + tags = { Source = "Azure Quickstarts for Terraform" } } @@ -42,7 +42,7 @@ resource "azurerm_container_service" "container_service" { enabled = false } - tags { + tags = { Source = "Azure Quickstarts for Terraform" } } diff --git a/examples/app-service/README.md b/examples/app-service/README.md index 38309b79b4e5..117ae91fdc6f 100644 --- a/examples/app-service/README.md +++ b/examples/app-service/README.md @@ -1,15 +1,3 @@ -# Azure App Service Sample +## Examples of using the App Service Resources -Sample to deploy an App Service within an App Service Plan. - -## Creates - -1. A Resource Group -2. An [App Service Plan](https://docs.microsoft.com/en-us/azure/app-service/azure-web-sites-web-hosting-plans-in-depth-overview) -3. An [App Service](https://azure.microsoft.com/en-gb/services/app-service/) configured for usage with .NET 4.x Application - -## Usage - -- Provide values to all variables (credentials and names). -- Create with `terraform apply` -- Destroy all with `terraform destroy --force` +This folder contains examples of using the App Service resources. diff --git a/examples/app-service/app.tf b/examples/app-service/app.tf deleted file mode 100644 index 8316a2bf49d0..000000000000 --- a/examples/app-service/app.tf +++ /dev/null @@ -1,55 +0,0 @@ -# Configure the Microsoft Azure Provider -provider "azurerm" { - # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: # subscription_id = "..." # client_id = "..." # client_secret = "..." # tenant_id = "..." -} - -resource "azurerm_resource_group" "default" { - name = "${var.resource_group_name}" - location = "${var.location}" -} - -resource "random_integer" "ri" { - min = 10000 - max = 99999 -} - -resource "azurerm_app_service_plan" "default" { - name = "tfex-appservice-${random_integer.ri.result}-plan" - location = "${azurerm_resource_group.default.location}" - resource_group_name = "${azurerm_resource_group.default.name}" - - sku { - tier = "${var.app_service_plan_sku_tier}" - size = "${var.app_service_plan_sku_size}" - } -} - -resource "azurerm_app_service" "default" { - name = "tfex-appservice-${random_integer.ri.result}" - location = "${azurerm_resource_group.default.location}" - resource_group_name = "${azurerm_resource_group.default.name}" - app_service_plan_id = "${azurerm_app_service_plan.default.id}" - - site_config { - dotnet_framework_version = "v4.0" - remote_debugging_enabled = true - remote_debugging_version = "VS2015" - } - - # app_settings { - # "SOME_KEY" = "some-value" - # } - # connection_string { - # name = "Database" - # type = "SQLServer" - # value = "Server=some-server.mydomain.com;Integrated Security=SSPI" - # } -} - -output "app_service_name" { - value = "${azurerm_app_service.default.name}" -} - -output "app_service_default_hostname" { - value = "https://${azurerm_app_service.default.default_site_hostname}" -} diff --git a/examples/app-service/docker-basic/README.md b/examples/app-service/docker-basic/README.md new file mode 100644 index 000000000000..b94876b300dc --- /dev/null +++ b/examples/app-service/docker-basic/README.md @@ -0,0 +1,25 @@ +# Example: a Linux App Service running a Docker container + +This example provisions a Linux App Service which runs a single Docker container. + +### Notes + +* The Container is launched on the first HTTP Request, which can take a while. +* Continuous Deployment of a single Docker Container can be achieved using the App Setting `DOCKER_ENABLE_CI` to `true`. +* If you're not using App Service Slots and Deployments are handled outside of Terraform - [it's possible to ignore changes to specific fields in the configuration using `ignore_changes` within Terraform's `lifecycle` block](https://www.terraform.io/docs/configuration/resources.html#lifecycle), for example: + +```hcl +resource "azurerm_app_service" "test" { + # ... + site_config = { + # ... + linux_fx_version = "DOCKER|appsvcsample/python-helloworld:0.1.2" + } + + lifecycle { + ignore_changes = [ + "site_config.0.linux_fx_version", # deployments are made outside of Terraform + ] + } +} +``` diff --git a/examples/app-service/docker-basic/main.tf b/examples/app-service/docker-basic/main.tf new file mode 100644 index 000000000000..70136854467c --- /dev/null +++ b/examples/app-service/docker-basic/main.tf @@ -0,0 +1,42 @@ +provider "azurerm" { + # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: + # subscription_id = "..." + # client_id = "..." + # client_secret = "..." + # tenant_id = "..." +} + +resource "azurerm_resource_group" "main" { + name = "${var.prefix}-resources" + location = "${var.location}" +} + +resource "azurerm_app_service_plan" "main" { + name = "${var.prefix}-asp" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + kind = "Linux" + reserved = true + + sku { + tier = "Standard" + size = "S1" + } +} + +resource "azurerm_app_service" "main" { + name = "${var.prefix}-appservice" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + app_service_plan_id = "${azurerm_app_service_plan.main.id}" + + site_config { + app_command_line = "" + linux_fx_version = "DOCKER|appsvcsample/python-helloworld:latest" + } + + app_settings = { + "WEBSITES_ENABLE_APP_SERVICE_STORAGE" = "false", + "DOCKER_REGISTRY_SERVER_URL" = "https://index.docker.io" + } +} diff --git a/examples/app-service/docker-basic/outputs.tf b/examples/app-service/docker-basic/outputs.tf new file mode 100644 index 000000000000..0167f4194ecd --- /dev/null +++ b/examples/app-service/docker-basic/outputs.tf @@ -0,0 +1,7 @@ +output "app_service_name" { + value = "${azurerm_app_service.main.name}" +} + +output "app_service_default_hostname" { + value = "https://${azurerm_app_service.main.default_site_hostname}" +} diff --git a/examples/app-service/docker-basic/variables.tf b/examples/app-service/docker-basic/variables.tf new file mode 100644 index 000000000000..fcd1745ba3e7 --- /dev/null +++ b/examples/app-service/docker-basic/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} diff --git a/examples/app-service/docker-compose/README.md b/examples/app-service/docker-compose/README.md new file mode 100644 index 000000000000..74386d994221 --- /dev/null +++ b/examples/app-service/docker-compose/README.md @@ -0,0 +1,24 @@ +# Example: a Linux App Service running multiple containers from a Docker Compose file. + +This example provisions a Linux App Service which runs multiple Docker Containers from Docker Compose file. + +### Notes + +* The Container is launched on the first HTTP Request, which can take a while. +* If you're not using App Service Slots and Deployments are handled outside of Terraform - [it's possible to ignore changes to specific fields in the configuration using `ignore_changes` within Terraform's `lifecycle` block](https://www.terraform.io/docs/configuration/resources.html#lifecycle), for example: + +```hcl +resource "azurerm_app_service" "test" { + # ... + site_config = { + # ... + linux_fx_version = "COMPOSE|${base64encode(file("compose.yml"))}" + } + + lifecycle { + ignore_changes = [ + "site_config.0.linux_fx_version", # deployments are made outside of Terraform + ] + } +} +``` diff --git a/examples/app-service/docker-compose/docker-compose.yml b/examples/app-service/docker-compose/docker-compose.yml new file mode 100644 index 000000000000..6962f3d80eb3 --- /dev/null +++ b/examples/app-service/docker-compose/docker-compose.yml @@ -0,0 +1,27 @@ +version: '3.3' + +services: + db: + image: mysql:5.7 + volumes: + - db_data:/var/lib/mysql + restart: always + environment: + MYSQL_ROOT_PASSWORD: somewordpress + MYSQL_DATABASE: wordpress + MYSQL_USER: wordpress + MYSQL_PASSWORD: wordpress + + wordpress: + depends_on: + - db + image: wordpress:latest + ports: + - "8000:80" + restart: always + environment: + WORDPRESS_DB_HOST: db:3306 + WORDPRESS_DB_USER: wordpress + WORDPRESS_DB_PASSWORD: wordpress +volumes: + db_data: diff --git a/examples/app-service/docker-compose/main.tf b/examples/app-service/docker-compose/main.tf new file mode 100644 index 000000000000..f3223a220421 --- /dev/null +++ b/examples/app-service/docker-compose/main.tf @@ -0,0 +1,41 @@ +provider "azurerm" { + # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: + # subscription_id = "..." + # client_id = "..." + # client_secret = "..." + # tenant_id = "..." +} + +resource "azurerm_resource_group" "main" { + name = "${var.prefix}-resources" + location = "${var.location}" +} + +resource "azurerm_app_service_plan" "main" { + name = "${var.prefix}-asp" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + kind = "Linux" + reserved = true + + sku { + tier = "Standard" + size = "S1" + } +} + +resource "azurerm_app_service" "main" { + name = "${var.prefix}-appservice" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + app_service_plan_id = "${azurerm_app_service_plan.main.id}" + + site_config { + app_command_line = "" + linux_fx_version = "COMPOSE|${base64encode(file("docker-compose.yml"))}" + } + + app_settings = { + "WEBSITES_ENABLE_APP_SERVICE_STORAGE" = "false" + } +} diff --git a/examples/app-service/docker-compose/outputs.tf b/examples/app-service/docker-compose/outputs.tf new file mode 100644 index 000000000000..0167f4194ecd --- /dev/null +++ b/examples/app-service/docker-compose/outputs.tf @@ -0,0 +1,7 @@ +output "app_service_name" { + value = "${azurerm_app_service.main.name}" +} + +output "app_service_default_hostname" { + value = "https://${azurerm_app_service.main.default_site_hostname}" +} diff --git a/examples/app-service/docker-compose/variables.tf b/examples/app-service/docker-compose/variables.tf new file mode 100644 index 000000000000..fcd1745ba3e7 --- /dev/null +++ b/examples/app-service/docker-compose/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} diff --git a/examples/app-service/docker-kubernetes/README.md b/examples/app-service/docker-kubernetes/README.md new file mode 100644 index 000000000000..b4a2c449ecec --- /dev/null +++ b/examples/app-service/docker-kubernetes/README.md @@ -0,0 +1,24 @@ +# Example: a Linux App Service running multiple containers from a Kubernetes Manifest + +This example provisions a Linux App Service which runs multiple Docker Containers from a Kubernetes Manifest. + +### Notes + +* The Container is launched on the first HTTP Request, which can take a while. +* If you're not using App Service Slots and Deployments are handled outside of Terraform - [it's possible to ignore changes to specific fields in the configuration using `ignore_changes` within Terraform's `lifecycle` block](https://www.terraform.io/docs/configuration/resources.html#lifecycle), for example: + +```hcl +resource "azurerm_app_service" "test" { + # ... + site_config = { + # ... + linux_fx_version = "KUBE|${base64encode(file("kubernetes.yml"))}" + } + + lifecycle { + ignore_changes = [ + "site_config.0.linux_fx_version", # deployments are made outside of Terraform + ] + } +} +``` diff --git a/examples/app-service/docker-kubernetes/kubernetes.yml b/examples/app-service/docker-kubernetes/kubernetes.yml new file mode 100644 index 000000000000..87f4bb8bbcca --- /dev/null +++ b/examples/app-service/docker-kubernetes/kubernetes.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: python +spec: + containers: + - name: web + image: appsvcsample/flaskapp:kube + # source code for this image repo come from "Get started with Docker Compose" on docker.com + ports: + - containerPort: 80 + - name: redis + image: redis:alpine diff --git a/examples/app-service/docker-kubernetes/main.tf b/examples/app-service/docker-kubernetes/main.tf new file mode 100644 index 000000000000..c3e45d4fc17a --- /dev/null +++ b/examples/app-service/docker-kubernetes/main.tf @@ -0,0 +1,41 @@ +provider "azurerm" { + # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: + # subscription_id = "..." + # client_id = "..." + # client_secret = "..." + # tenant_id = "..." +} + +resource "azurerm_resource_group" "main" { + name = "${var.prefix}-resources" + location = "${var.location}" +} + +resource "azurerm_app_service_plan" "main" { + name = "${var.prefix}-asp" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + kind = "Linux" + reserved = true + + sku { + tier = "Standard" + size = "S1" + } +} + +resource "azurerm_app_service" "main" { + name = "${var.prefix}-appservice" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + app_service_plan_id = "${azurerm_app_service_plan.main.id}" + + site_config { + app_command_line = "" + linux_fx_version = "KUBE|${base64encode(file("kubernetes.yml"))}" + } + + app_settings = { + "WEBSITES_ENABLE_APP_SERVICE_STORAGE" = "false" + } +} diff --git a/examples/app-service/docker-kubernetes/outputs.tf b/examples/app-service/docker-kubernetes/outputs.tf new file mode 100644 index 000000000000..0167f4194ecd --- /dev/null +++ b/examples/app-service/docker-kubernetes/outputs.tf @@ -0,0 +1,7 @@ +output "app_service_name" { + value = "${azurerm_app_service.main.name}" +} + +output "app_service_default_hostname" { + value = "https://${azurerm_app_service.main.default_site_hostname}" +} diff --git a/examples/app-service/docker-kubernetes/variables.tf b/examples/app-service/docker-kubernetes/variables.tf new file mode 100644 index 000000000000..fcd1745ba3e7 --- /dev/null +++ b/examples/app-service/docker-kubernetes/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} diff --git a/examples/app-service/linux-basic/README.md b/examples/app-service/linux-basic/README.md new file mode 100644 index 000000000000..7b0f2fc01aaa --- /dev/null +++ b/examples/app-service/linux-basic/README.md @@ -0,0 +1,3 @@ +# Example: a Basic Linux App Service + +This example provisions a basic Linux App Service. diff --git a/examples/app-service/linux-basic/main.tf b/examples/app-service/linux-basic/main.tf new file mode 100644 index 000000000000..db5940d34a34 --- /dev/null +++ b/examples/app-service/linux-basic/main.tf @@ -0,0 +1,38 @@ +provider "azurerm" { + # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: + # subscription_id = "..." + # client_id = "..." + # client_secret = "..." + # tenant_id = "..." +} + +resource "azurerm_resource_group" "main" { + name = "${var.prefix}-resources" + location = "${var.location}" +} + +resource "azurerm_app_service_plan" "main" { + name = "${var.prefix}-asp" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + kind = "Linux" + reserved = true + + sku { + tier = "Standard" + size = "S1" + } +} + +resource "azurerm_app_service" "main" { + name = "${var.prefix}-appservice" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + app_service_plan_id = "${azurerm_app_service_plan.main.id}" + + site_config { + dotnet_framework_version = "v4.0" + remote_debugging_enabled = true + remote_debugging_version = "VS2015" + } +} diff --git a/examples/app-service/linux-basic/outputs.tf b/examples/app-service/linux-basic/outputs.tf new file mode 100644 index 000000000000..0167f4194ecd --- /dev/null +++ b/examples/app-service/linux-basic/outputs.tf @@ -0,0 +1,7 @@ +output "app_service_name" { + value = "${azurerm_app_service.main.name}" +} + +output "app_service_default_hostname" { + value = "https://${azurerm_app_service.main.default_site_hostname}" +} diff --git a/examples/app-service/linux-basic/variables.tf b/examples/app-service/linux-basic/variables.tf new file mode 100644 index 000000000000..fcd1745ba3e7 --- /dev/null +++ b/examples/app-service/linux-basic/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} diff --git a/examples/app-service/variables.tf b/examples/app-service/variables.tf deleted file mode 100644 index de3130794e0f..000000000000 --- a/examples/app-service/variables.tf +++ /dev/null @@ -1,23 +0,0 @@ -variable "resource_group_name" { - type = "string" - description = "Name of the azure resource group." - default = "tfex-appserviceplan" -} - -variable "location" { - type = "string" - description = "Location of the azure resource group." - default = "westus" -} - -variable "app_service_plan_sku_tier" { - type = "string" - description = "SKU tier of the App Service Plan" - default = "Basic" # Basic | Standard | ... -} - -variable "app_service_plan_sku_size" { - type = "string" - description = "SKU size of the App Service Plan" - default = "B1" # B1 | S1 | ... -} diff --git a/examples/app-service/windows-basic/README.md b/examples/app-service/windows-basic/README.md new file mode 100644 index 000000000000..18f374af51e4 --- /dev/null +++ b/examples/app-service/windows-basic/README.md @@ -0,0 +1,3 @@ +# Example: a Basic (Windows) App Service + +This example provisions a basic Windows App Service. diff --git a/examples/app-service/windows-basic/main.tf b/examples/app-service/windows-basic/main.tf new file mode 100644 index 000000000000..e21ed6d0ea2f --- /dev/null +++ b/examples/app-service/windows-basic/main.tf @@ -0,0 +1,36 @@ +provider "azurerm" { + # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: + # subscription_id = "..." + # client_id = "..." + # client_secret = "..." + # tenant_id = "..." +} + +resource "azurerm_resource_group" "main" { + name = "${var.prefix}-resources" + location = "${var.location}" +} + +resource "azurerm_app_service_plan" "main" { + name = "${var.prefix}-asp" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + + sku { + tier = "Basic" + size = "B1" + } +} + +resource "azurerm_app_service" "main" { + name = "${var.prefix}-appservice" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + app_service_plan_id = "${azurerm_app_service_plan.main.id}" + + site_config { + dotnet_framework_version = "v4.0" + remote_debugging_enabled = true + remote_debugging_version = "VS2015" + } +} diff --git a/examples/app-service/windows-basic/outputs.tf b/examples/app-service/windows-basic/outputs.tf new file mode 100644 index 000000000000..0167f4194ecd --- /dev/null +++ b/examples/app-service/windows-basic/outputs.tf @@ -0,0 +1,7 @@ +output "app_service_name" { + value = "${azurerm_app_service.main.name}" +} + +output "app_service_default_hostname" { + value = "https://${azurerm_app_service.main.default_site_hostname}" +} diff --git a/examples/app-service/windows-basic/variables.tf b/examples/app-service/windows-basic/variables.tf new file mode 100644 index 000000000000..fcd1745ba3e7 --- /dev/null +++ b/examples/app-service/windows-basic/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} diff --git a/examples/app-service/windows-java/README.md b/examples/app-service/windows-java/README.md new file mode 100644 index 000000000000..24b43f985ff4 --- /dev/null +++ b/examples/app-service/windows-java/README.md @@ -0,0 +1,3 @@ +# Example: a (Windows) App Service for a Java App + +This example provisions a Windows App Service which is configured to run Java. diff --git a/examples/app-service/windows-java/main.tf b/examples/app-service/windows-java/main.tf new file mode 100644 index 000000000000..e09ced4590e6 --- /dev/null +++ b/examples/app-service/windows-java/main.tf @@ -0,0 +1,37 @@ + +provider "azurerm" { + # if you're using a Service Principal (shared account) then either set the environment variables, or fill these in: + # subscription_id = "..." + # client_id = "..." + # client_secret = "..." + # tenant_id = "..." +} + +resource "azurerm_resource_group" "main" { + name = "${var.prefix}-resources" + location = "${var.location}" +} + +resource "azurerm_app_service_plan" "main" { + name = "${var.prefix}-asp" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + + sku { + tier = "Basic" + size = "B1" + } +} + +resource "azurerm_app_service" "main" { + name = "${var.prefix}-appservice" + location = "${azurerm_resource_group.main.location}" + resource_group_name = "${azurerm_resource_group.main.name}" + app_service_plan_id = "${azurerm_app_service_plan.main.id}" + + site_config { + java_version = "1.8" + java_container = "JETTY" + java_container_version = "9.3" + } +} diff --git a/examples/app-service/windows-java/outputs.tf b/examples/app-service/windows-java/outputs.tf new file mode 100644 index 000000000000..0167f4194ecd --- /dev/null +++ b/examples/app-service/windows-java/outputs.tf @@ -0,0 +1,7 @@ +output "app_service_name" { + value = "${azurerm_app_service.main.name}" +} + +output "app_service_default_hostname" { + value = "https://${azurerm_app_service.main.default_site_hostname}" +} diff --git a/examples/app-service/windows-java/variables.tf b/examples/app-service/windows-java/variables.tf new file mode 100644 index 000000000000..fcd1745ba3e7 --- /dev/null +++ b/examples/app-service/windows-java/variables.tf @@ -0,0 +1,7 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} diff --git a/examples/batch/main.tf b/examples/batch/main.tf index 1978af838850..be7de2854ca8 100644 --- a/examples/batch/main.tf +++ b/examples/batch/main.tf @@ -57,7 +57,7 @@ resource "azurerm_batch_pool" "fixedpool" { max_task_retry_count = 1 wait_for_success = true - environment { + environment = { env = "TEST" } diff --git a/examples/eventhub/main.tf b/examples/eventhub/main.tf index cfd05b678b9e..30776cade562 100644 --- a/examples/eventhub/main.tf +++ b/examples/eventhub/main.tf @@ -16,7 +16,7 @@ resource "azurerm_eventhub_namespace" "example" { sku = "Standard" capacity = 2 - tags { + tags = { environment = "Examples" } } diff --git a/examples/freebsd-jumpbox/main.tf b/examples/freebsd-jumpbox/main.tf index c63b767590c8..6c6534fa6f8f 100644 --- a/examples/freebsd-jumpbox/main.tf +++ b/examples/freebsd-jumpbox/main.tf @@ -53,7 +53,7 @@ resource "azurerm_network_security_group" "mgmt-nsg" { destination_address_prefix = "*" } - tags { + tags = { environment = "Management" } } @@ -76,7 +76,7 @@ resource "azurerm_network_security_group" "web-nsg" { destination_address_prefix = "*" } - tags { + tags = { environment = "Web" } } @@ -95,7 +95,7 @@ resource "azurerm_network_interface" "nic" { public_ip_address_id = "${azurerm_public_ip.pip.id}" } - tags { + tags = { environment = "Management" } } @@ -108,7 +108,7 @@ resource "azurerm_public_ip" "pip" { allocation_method = "Dynamic" domain_name_label = "${var.dns_name}" - tags { + tags = { environment = "Management" } } @@ -181,7 +181,7 @@ resource "azurerm_virtual_machine" "vm" { storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}" } - tags { + tags = { environment = "Management" } } diff --git a/examples/kubernetes/basic/main.tf b/examples/kubernetes/basic/main.tf index 42501bde4c25..50fe0b30e2af 100644 --- a/examples/kubernetes/basic/main.tf +++ b/examples/kubernetes/basic/main.tf @@ -22,7 +22,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "${var.kubernetes_client_secret}" } - tags { + tags = { Environment = "Production" } } diff --git a/examples/kubernetes/monitoring/main.tf b/examples/kubernetes/monitoring/main.tf index 406874d31531..16fe3e5ce9db 100644 --- a/examples/kubernetes/monitoring/main.tf +++ b/examples/kubernetes/monitoring/main.tf @@ -49,7 +49,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } - tags { + tags = { Environment = "Production" } } diff --git a/examples/kubernetes/role-based-access-control-azuread/main.tf b/examples/kubernetes/role-based-access-control-azuread/main.tf index 4c4de2ab83fd..b4cb76e6188e 100644 --- a/examples/kubernetes/role-based-access-control-azuread/main.tf +++ b/examples/kubernetes/role-based-access-control-azuread/main.tf @@ -35,7 +35,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } - tags { + tags = { Environment = "Production" } } diff --git a/examples/kubernetes/role-based-access-control/main.tf b/examples/kubernetes/role-based-access-control/main.tf index 66380dc43112..1f0834f6cb1a 100644 --- a/examples/kubernetes/role-based-access-control/main.tf +++ b/examples/kubernetes/role-based-access-control/main.tf @@ -26,7 +26,7 @@ resource "azurerm_kubernetes_cluster" "test" { enabled = true } - tags { + tags = { Environment = "Production" } } diff --git a/examples/media-services/multiple_storage_accounts/main.tf b/examples/media-services/multiple_storage_accounts/main.tf new file mode 100644 index 000000000000..2942bb2fe1ce --- /dev/null +++ b/examples/media-services/multiple_storage_accounts/main.tf @@ -0,0 +1,52 @@ +resource "azurerm_resource_group" "testrg" { + name = "amstestrg" + location = "westus" +} + +resource "azurerm_storage_account" "testsa" { + name = "amstestsa" + resource_group_name = "${azurerm_resource_group.testrg.name}" + location = "${azurerm_resource_group.testrg.location}" + account_tier = "Standard" + account_replication_type = "GRS" +} + +# +# Remove comments here and in the azure_media_services resource after the initial run +# to see a storage account get added to an existing AMS account. +# +/* +resource "azurerm_storage_account" "testsa2" { + name = "amstestsa2" + resource_group_name = "${azurerm_resource_group.testrg.name}" + location = "${azurerm_resource_group.testrg.location}" + account_tier = "Standard" + account_replication_type = "GRS" + + tags = { + environment = "staging" + } +} +*/ + +resource "azurerm_media_services" "ams" { + name = "amstest" + location = "${azurerm_resource_group.testrg.location}" + resource_group_name = "${azurerm_resource_group.testrg.name}" + + storage_account { + id = "${azurerm_storage_account.testsa.id}" + is_primary = true + } + + /* + storage_account { + id = "${azurerm_storage_account.testsa2.id}" + is_primary = false + } + */ +} + +output "rendered" { + value = "${azurerm_media_services.ams.id}" +} diff --git a/examples/media-services/new_primary_storage/main.tf b/examples/media-services/new_primary_storage/main.tf new file mode 100644 index 000000000000..1dd9517b0517 --- /dev/null +++ b/examples/media-services/new_primary_storage/main.tf @@ -0,0 +1,31 @@ +resource "azurerm_resource_group" "testrg" { + name = "amstestrg" + location = "westus" +} + +resource "azurerm_storage_account" "testsa" { + name = "amstestsa" + resource_group_name = "${azurerm_resource_group.testrg.name}" + location = "${azurerm_resource_group.testrg.location}" + account_tier = "Standard" + account_replication_type = "GRS" + + tags = { + environment = "staging" + } +} + +resource "azurerm_media_services" "ams" { + name = "amstest" + location = "${azurerm_resource_group.testrg.location}" + resource_group_name = "${azurerm_resource_group.testrg.name}" + + storage_account { + id = "${azurerm_storage_account.testsa.id}" + is_primary = true + } +} + +output "rendered" { + value = "${azurerm_media_services.ams.id}" +} diff --git a/examples/openshift-origin/main.tf b/examples/openshift-origin/main.tf index e37c8adf1ffb..7bcd4aabd3d4 100644 --- a/examples/openshift-origin/main.tf +++ b/examples/openshift-origin/main.tf @@ -476,7 +476,7 @@ resource "azurerm_virtual_machine" "bastion" { delete_os_disk_on_termination = true delete_data_disks_on_termination = true - tags { + tags = { displayName = "${var.openshift_cluster_prefix}-bastion VM Creation" } @@ -525,7 +525,7 @@ resource "azurerm_virtual_machine" "master" { count = "${var.master_instance_count}" depends_on = ["azurerm_virtual_machine.infra", "azurerm_virtual_machine.node"] - tags { + tags = { displayName = "${var.openshift_cluster_prefix}-master VM Creation" } @@ -607,7 +607,7 @@ resource "azurerm_virtual_machine" "infra" { delete_data_disks_on_termination = true count = "${var.infra_instance_count}" - tags { + tags = { displayName = "${var.openshift_cluster_prefix}-infra VM Creation" } @@ -684,7 +684,7 @@ resource "azurerm_virtual_machine" "node" { delete_data_disks_on_termination = true count = "${var.node_instance_count}" - tags { + tags = { displayName = "${var.openshift_cluster_prefix}-node VM Creation" } diff --git a/examples/scheduler-jobs/versions.tf b/examples/scheduler-jobs/versions.tf new file mode 100644 index 000000000000..ac97c6ac8e7c --- /dev/null +++ b/examples/scheduler-jobs/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/examples/vmss-ubuntu/main.tf b/examples/vmss-ubuntu/main.tf index 4701a0bc3551..82fb2a28b991 100644 --- a/examples/vmss-ubuntu/main.tf +++ b/examples/vmss-ubuntu/main.tf @@ -127,3 +127,59 @@ resource "azurerm_virtual_machine_scale_set" "scaleset" { version = "latest" } } + +resource "azurerm_autoscale_setting" "autoscale-cpu" { + name = "autoscale-cpu" + target_resource_id = "${azurerm_virtual_machine_scale_set.scaleset.id}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + profile { + name = "autoscale-cpu" + + capacity { + default = "${var.instance_count}" + minimum = 0 + maximum = 1000 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.scaleset.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = "1" + cooldown = "PT1M" + } + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.scaleset.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "LessThan" + threshold = 15 + } + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = "1" + cooldown = "PT1M" + } + } + } +} diff --git a/go.mod b/go.mod index 86715434b39f..5ac068c39d66 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ require ( cloud.google.com/go v0.34.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.4.1 // indirect git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a // indirect - github.com/Azure/azure-sdk-for-go v24.0.0+incompatible - github.com/Azure/go-autorest v11.3.2+incompatible + github.com/Azure/azure-sdk-for-go v25.1.0+incompatible + github.com/Azure/go-autorest v11.4.0+incompatible github.com/agext/levenshtein v1.2.1 // indirect github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4 // indirect github.com/apparentlymart/go-rundeck-api v0.0.0-20160826143032-f6af74d34d1e // indirect @@ -15,6 +15,7 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/davecgh/go-spew v1.1.0 github.com/fsouza/go-dockerclient v0.0.0-20160427172547-1d4f4ae73768 // indirect + github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 // indirect github.com/go-ini/ini v1.23.1 // indirect github.com/golang/mock v1.2.0 // indirect github.com/google/uuid v0.0.0-20170814143639-7e072fc3a7be @@ -34,6 +35,7 @@ require ( github.com/hashicorp/terraform v0.11.9 github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f // indirect github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect + github.com/marstr/collection v1.0.1 // indirect github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c // indirect github.com/mitchellh/cli v1.0.0 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect @@ -42,10 +44,14 @@ require ( github.com/mitchellh/hashstructure v1.0.0 // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect github.com/openzipkin/zipkin-go v0.1.3 // indirect + github.com/pkg/errors v0.8.1 // indirect github.com/prometheus/client_golang v0.9.2 // indirect github.com/prometheus/common v0.0.0-20181218105931-67670fe90761 // indirect github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07 github.com/satori/uuid v0.0.0-20160927100844-b061729afc07 + github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect + github.com/spf13/cobra v0.0.3 // indirect + github.com/spf13/pflag v1.0.3 // indirect github.com/ulikunitz/xz v0.5.4 // indirect github.com/zclconf/go-cty v0.0.0-20180227163247-7166230c635f // indirect golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 diff --git a/go.sum b/go.sum index 7ac51eb566cd..58921cf2a39b 100644 --- a/go.sum +++ b/go.sum @@ -7,11 +7,15 @@ git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a/go.mod h1:fPE2ZNJGy github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v24.0.0+incompatible h1:GdF0ozHojCPSZH1LPWA2+XHQ3G/mapn0G+PCIlMVZg4= github.com/Azure/azure-sdk-for-go v24.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v25.1.0+incompatible h1:bA8mqsHUc9RbzHG64A6r7KnpvLFHJdxrpI75FrFln2M= +github.com/Azure/azure-sdk-for-go v25.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.2.8+incompatible h1:Q2feRPMlcfVcqz3pF87PJzkm5lZrL+x6BDtzhODzNJM= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.3.2+incompatible h1:2bRmoaLvtIXW5uWpZVoIkc0C1z7c84rVGnP+3mpyCRg= github.com/Azure/go-autorest v11.3.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v11.4.0+incompatible h1:z3Yr6KYqs0nhSNwqGXEBpWK977hxVqsLv2n9PVYcixY= +github.com/Azure/go-autorest v11.4.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-cidr v0.0.0-20170418151526-7e4b007599d4 h1:bpmA3CCh0K829XIR5kfcV+YDt+Gwi7SEYPCcYEVKWUo= @@ -48,6 +52,8 @@ github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsouza/go-dockerclient v0.0.0-20160427172547-1d4f4ae73768/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-ini/ini v1.23.1 h1:amNPHl+tCb4BolL2NAIQaKLY+ZiL1Ju7OqZ9Fx6PTBQ= github.com/go-ini/ini v1.23.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -98,6 +104,8 @@ github.com/hashicorp/yamux v0.0.0-20160720233140-d1caa6c97c9f/go.mod h1:+NfK9FKe github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/marstr/collection v1.0.1 h1:j61osRfyny7zxBlLRtoCvOZ2VX7HEyybkZcsLNLJ0z0= +github.com/marstr/collection v1.0.1/go.mod h1:HHDXVxjLO3UYCBXJWY+J/ZrxCUOYqrO66ob1AzIsmYA= github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c h1:N7uWGS2fTwH/4BwxbHiJZNAFTSJ5yPU0emHsQWvkxEY= github.com/marstr/guid v0.0.0-20170427235115-8bdf7d1a087c/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.0-20180310133214-efa589957cd0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -125,6 +133,8 @@ github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/I github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/posener/complete v0.0.0-20170908125245-88e59760adad/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -140,6 +150,12 @@ github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07 h1:DEZDfcCVq3xDJrjq github.com/satori/go.uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/uuid v0.0.0-20160927100844-b061729afc07 h1:81vvGlnI/AZ1/TxGDirw3ofUoS64TyjmPQt5C9XODTw= github.com/satori/uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:B8HLsPLik/YNn6KKWVMDJ8nzCL8RP5WyfsnmvnAEwIU= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/ulikunitz/xz v0.5.4 h1:zATC2OoZ8H1TZll3FpbX+ikwmadbO699PE06cIkm9oU= github.com/ulikunitz/xz v0.5.4/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/zclconf/go-cty v0.0.0-20180227163247-7166230c635f h1:OkKoSRyYPHTuUJcbnjUPsuW+qzkxkqQxd8zJjZcsTc0= @@ -177,6 +193,7 @@ golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e h1:XEcLGV2fKy3FrsoJVCkX+lMhqc9Suj7J5L/wldA1wu4= golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= diff --git a/scripts/gogetcookie.sh b/scripts/gogetcookie.sh index 26c63a64bed4..1c04215d7ab4 100755 --- a/scripts/gogetcookie.sh +++ b/scripts/gogetcookie.sh @@ -6,5 +6,6 @@ chmod 0600 ~/.gitcookies git config --global http.cookiefile ~/.gitcookies tr , \\t <<\__END__ >>~/.gitcookies -.googlesource.com,TRUE,/,TRUE,2147483647,o,git-paul.hashicorp.com=1/z7s05EYPudQ9qoe6dMVfmAVwgZopEkZBb1a2mA5QtHE +go.googlesource.com,TRUE,/,TRUE,2147483647,o,git-kt.katbyte.me=1/sEvv4P2NiGofB7kgPV7DBbsV5V8_od3JULgYIyZJnUM +go-review.googlesource.com,TRUE,/,TRUE,2147483647,o,git-kt.katbyte.me=1/sEvv4P2NiGofB7kgPV7DBbsV5V8_od3JULgYIyZJnUM __END__ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go index cbe939d82b38..c791818c4e2b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go @@ -1,6 +1,6 @@ // +build go1.9 -// Copyright 2018 Microsoft Corporation +// Copyright 2019 Microsoft Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/api.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/api.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/api.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/api.go index 330c97d56c77..29f6710f910b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/api.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/api.go @@ -102,7 +102,7 @@ func (client APIClient) CreateOrUpdatePreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -203,7 +203,7 @@ func (client APIClient) DeletePreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -298,7 +298,7 @@ func (client APIClient) GetPreparer(ctx context.Context, resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -390,7 +390,7 @@ func (client APIClient) GetEntityTagPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -491,7 +491,7 @@ func (client APIClient) ListByServicePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -591,8 +591,7 @@ func (client APIClient) ListByServiceComplete(ctx context.Context, resourceGroup // | isCurrent | eq | substringof, contains, startswith, endswith | // top - number of records to return. // skip - number of records to skip. -// includeNotTaggedApis - include not tagged apis in response -func (client APIClient) ListByTags(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, includeNotTaggedApis *bool) (result TagResourceCollectionPage, err error) { +func (client APIClient) ListByTags(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result TagResourceCollectionPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/APIClient.ListByTags") defer func() { @@ -618,7 +617,7 @@ func (client APIClient) ListByTags(ctx context.Context, resourceGroupName string } result.fn = client.listByTagsNextResults - req, err := client.ListByTagsPreparer(ctx, resourceGroupName, serviceName, filter, top, skip, includeNotTaggedApis) + req, err := client.ListByTagsPreparer(ctx, resourceGroupName, serviceName, filter, top, skip) if err != nil { err = autorest.NewErrorWithError(err, "apimanagement.APIClient", "ListByTags", nil, "Failure preparing request") return @@ -640,14 +639,14 @@ func (client APIClient) ListByTags(ctx context.Context, resourceGroupName string } // ListByTagsPreparer prepares the ListByTags request. -func (client APIClient) ListByTagsPreparer(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, includeNotTaggedApis *bool) (*http.Request, error) { +func (client APIClient) ListByTagsPreparer(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "serviceName": autorest.Encode("path", serviceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -660,11 +659,6 @@ func (client APIClient) ListByTagsPreparer(ctx context.Context, resourceGroupNam if skip != nil { queryParameters["$skip"] = autorest.Encode("query", *skip) } - if includeNotTaggedApis != nil { - queryParameters["includeNotTaggedApis"] = autorest.Encode("query", *includeNotTaggedApis) - } else { - queryParameters["includeNotTaggedApis"] = autorest.Encode("query", false) - } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -716,7 +710,7 @@ func (client APIClient) listByTagsNextResults(ctx context.Context, lastResults T } // ListByTagsComplete enumerates all values, automatically crossing page boundaries as required. -func (client APIClient) ListByTagsComplete(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, includeNotTaggedApis *bool) (result TagResourceCollectionIterator, err error) { +func (client APIClient) ListByTagsComplete(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result TagResourceCollectionIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/APIClient.ListByTags") defer func() { @@ -727,7 +721,7 @@ func (client APIClient) ListByTagsComplete(ctx context.Context, resourceGroupNam tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListByTags(ctx, resourceGroupName, serviceName, filter, top, skip, includeNotTaggedApis) + result.page, err = client.ListByTags(ctx, resourceGroupName, serviceName, filter, top, skip) return } @@ -793,7 +787,7 @@ func (client APIClient) UpdatePreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apidiagnostic.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apidiagnostic.go similarity index 89% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apidiagnostic.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apidiagnostic.go index 4fbba76c689a..ca85f39ce6cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apidiagnostic.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apidiagnostic.go @@ -75,42 +75,7 @@ func (client APIDiagnosticClient) CreateOrUpdate(ctx context.Context, resourceGr {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.LoggerID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.DiagnosticContractProperties.Sampling", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Sampling.Percentage", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Sampling.Percentage", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.DiagnosticContractProperties.Sampling.Percentage", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Frontend", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Frontend.Response", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Response.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Response.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Response.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Backend", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Backend.Response", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Response.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Response.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Response.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - }}, - }}}}}); err != nil { + Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { return result, validation.NewError("apimanagement.APIDiagnosticClient", "CreateOrUpdate", err.Error()) } @@ -145,7 +110,7 @@ func (client APIDiagnosticClient) CreateOrUpdatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -250,7 +215,7 @@ func (client APIDiagnosticClient) DeletePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -347,7 +312,7 @@ func (client APIDiagnosticClient) GetPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -444,7 +409,7 @@ func (client APIDiagnosticClient) GetEntityTagPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -546,7 +511,7 @@ func (client APIDiagnosticClient) ListByServicePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -692,7 +657,7 @@ func (client APIDiagnosticClient) UpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apidiagnosticlogger.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apidiagnosticlogger.go new file mode 100644 index 000000000000..39adb12ed735 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apidiagnosticlogger.go @@ -0,0 +1,502 @@ +package apimanagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// APIDiagnosticLoggerClient is the apiManagement Client +type APIDiagnosticLoggerClient struct { + BaseClient +} + +// NewAPIDiagnosticLoggerClient creates an instance of the APIDiagnosticLoggerClient client. +func NewAPIDiagnosticLoggerClient(subscriptionID string) APIDiagnosticLoggerClient { + return NewAPIDiagnosticLoggerClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAPIDiagnosticLoggerClientWithBaseURI creates an instance of the APIDiagnosticLoggerClient client. +func NewAPIDiagnosticLoggerClientWithBaseURI(baseURI string, subscriptionID string) APIDiagnosticLoggerClient { + return APIDiagnosticLoggerClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckEntityExists checks that logger entity specified by identifier is associated with the diagnostics entity. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// apiid - API identifier. Must be unique in the current API Management service instance. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// loggerid - logger identifier. Must be unique in the API Management service instance. +func (client APIDiagnosticLoggerClient) CheckEntityExists(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, loggerid string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/APIDiagnosticLoggerClient.CheckEntityExists") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: apiid, + Constraints: []validation.Constraint{{Target: "apiid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "apiid", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "apiid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: loggerid, + Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "loggerid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}}); err != nil { + return result, validation.NewError("apimanagement.APIDiagnosticLoggerClient", "CheckEntityExists", err.Error()) + } + + req, err := client.CheckEntityExistsPreparer(ctx, resourceGroupName, serviceName, apiid, diagnosticID, loggerid) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "CheckEntityExists", nil, "Failure preparing request") + return + } + + resp, err := client.CheckEntityExistsSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "CheckEntityExists", resp, "Failure sending request") + return + } + + result, err = client.CheckEntityExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "CheckEntityExists", resp, "Failure responding to request") + } + + return +} + +// CheckEntityExistsPreparer prepares the CheckEntityExists request. +func (client APIDiagnosticLoggerClient) CheckEntityExistsPreparer(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, loggerid string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "apiId": autorest.Encode("path", apiid), + "diagnosticId": autorest.Encode("path", diagnosticID), + "loggerid": autorest.Encode("path", loggerid), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/diagnostics/{diagnosticId}/loggers/{loggerid}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckEntityExistsSender sends the CheckEntityExists request. The method will close the +// http.Response Body if it receives an error. +func (client APIDiagnosticLoggerClient) CheckEntityExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckEntityExistsResponder handles the response to the CheckEntityExists request. The method always +// closes the http.Response Body. +func (client APIDiagnosticLoggerClient) CheckEntityExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate attaches a logger to a diagnostic for an API. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// apiid - API identifier. Must be unique in the current API Management service instance. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// loggerid - logger identifier. Must be unique in the API Management service instance. +func (client APIDiagnosticLoggerClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, loggerid string) (result LoggerContract, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/APIDiagnosticLoggerClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: apiid, + Constraints: []validation.Constraint{{Target: "apiid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "apiid", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "apiid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: loggerid, + Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "loggerid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}}); err != nil { + return result, validation.NewError("apimanagement.APIDiagnosticLoggerClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serviceName, apiid, diagnosticID, loggerid) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client APIDiagnosticLoggerClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, loggerid string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "apiId": autorest.Encode("path", apiid), + "diagnosticId": autorest.Encode("path", diagnosticID), + "loggerid": autorest.Encode("path", loggerid), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/diagnostics/{diagnosticId}/loggers/{loggerid}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client APIDiagnosticLoggerClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client APIDiagnosticLoggerClient) CreateOrUpdateResponder(resp *http.Response) (result LoggerContract, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified Logger from Diagnostic for an API. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// apiid - API identifier. Must be unique in the current API Management service instance. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// loggerid - logger identifier. Must be unique in the API Management service instance. +func (client APIDiagnosticLoggerClient) Delete(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, loggerid string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/APIDiagnosticLoggerClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: apiid, + Constraints: []validation.Constraint{{Target: "apiid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "apiid", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "apiid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: loggerid, + Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "loggerid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}}); err != nil { + return result, validation.NewError("apimanagement.APIDiagnosticLoggerClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, serviceName, apiid, diagnosticID, loggerid) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client APIDiagnosticLoggerClient) DeletePreparer(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, loggerid string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "apiId": autorest.Encode("path", apiid), + "diagnosticId": autorest.Encode("path", diagnosticID), + "loggerid": autorest.Encode("path", loggerid), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/diagnostics/{diagnosticId}/loggers/{loggerid}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client APIDiagnosticLoggerClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client APIDiagnosticLoggerClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListByService lists all loggers associated with the specified Diagnostic of an API. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// apiid - API identifier. Must be unique in the current API Management service instance. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// filter - | Field | Supported operators | Supported functions | +// |-------------|------------------------|-----------------------------------| +// | id | ge, le, eq, ne, gt, lt | substringof, startswith, endswith | +// | type | eq | | +// top - number of records to return. +// skip - number of records to skip. +func (client APIDiagnosticLoggerClient) ListByService(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, filter string, top *int32, skip *int32) (result LoggerCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/APIDiagnosticLoggerClient.ListByService") + defer func() { + sc := -1 + if result.lc.Response.Response != nil { + sc = result.lc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: apiid, + Constraints: []validation.Constraint{{Target: "apiid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "apiid", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "apiid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: top, + Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}}, + {TargetValue: skip, + Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("apimanagement.APIDiagnosticLoggerClient", "ListByService", err.Error()) + } + + result.fn = client.listByServiceNextResults + req, err := client.ListByServicePreparer(ctx, resourceGroupName, serviceName, apiid, diagnosticID, filter, top, skip) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "ListByService", nil, "Failure preparing request") + return + } + + resp, err := client.ListByServiceSender(req) + if err != nil { + result.lc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "ListByService", resp, "Failure sending request") + return + } + + result.lc, err = client.ListByServiceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "ListByService", resp, "Failure responding to request") + } + + return +} + +// ListByServicePreparer prepares the ListByService request. +func (client APIDiagnosticLoggerClient) ListByServicePreparer(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, filter string, top *int32, skip *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "apiId": autorest.Encode("path", apiid), + "diagnosticId": autorest.Encode("path", diagnosticID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if skip != nil { + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/diagnostics/{diagnosticId}/loggers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByServiceSender sends the ListByService request. The method will close the +// http.Response Body if it receives an error. +func (client APIDiagnosticLoggerClient) ListByServiceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByServiceResponder handles the response to the ListByService request. The method always +// closes the http.Response Body. +func (client APIDiagnosticLoggerClient) ListByServiceResponder(resp *http.Response) (result LoggerCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByServiceNextResults retrieves the next set of results, if any. +func (client APIDiagnosticLoggerClient) listByServiceNextResults(ctx context.Context, lastResults LoggerCollection) (result LoggerCollection, err error) { + req, err := lastResults.loggerCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "listByServiceNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByServiceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "listByServiceNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByServiceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.APIDiagnosticLoggerClient", "listByServiceNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByServiceComplete enumerates all values, automatically crossing page boundaries as required. +func (client APIDiagnosticLoggerClient) ListByServiceComplete(ctx context.Context, resourceGroupName string, serviceName string, apiid string, diagnosticID string, filter string, top *int32, skip *int32) (result LoggerCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/APIDiagnosticLoggerClient.ListByService") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByService(ctx, resourceGroupName, serviceName, apiid, diagnosticID, filter, top, skip) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiexport.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiexport.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiexport.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiexport.go index 3af7d049f302..b00aa7225d10 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiexport.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiexport.go @@ -103,7 +103,7 @@ func (client APIExportClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "export": autorest.Encode("query", "true"), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissue.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissue.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissue.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissue.go index 5e1f499c425a..518bf144696e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissue.go @@ -114,7 +114,7 @@ func (client APIIssueClient) CreateOrUpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -219,7 +219,7 @@ func (client APIIssueClient) DeletePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -316,7 +316,7 @@ func (client APIIssueClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -413,7 +413,7 @@ func (client APIIssueClient) GetEntityTagPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -517,7 +517,7 @@ func (client APIIssueClient) ListByServicePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -663,7 +663,7 @@ func (client APIIssueClient) UpdatePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissueattachment.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissueattachment.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissueattachment.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissueattachment.go index 84a019968f90..29a371da081f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissueattachment.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissueattachment.go @@ -120,7 +120,7 @@ func (client APIIssueAttachmentClient) CreateOrUpdatePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -231,7 +231,7 @@ func (client APIIssueAttachmentClient) DeletePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -334,7 +334,7 @@ func (client APIIssueAttachmentClient) GetPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -437,7 +437,7 @@ func (client APIIssueAttachmentClient) GetEntityTagPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -546,7 +546,7 @@ func (client APIIssueAttachmentClient) ListByServicePreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissuecomment.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissuecomment.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissuecomment.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissuecomment.go index e556b5bd91ba..25bfa4cf393f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiissuecomment.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiissuecomment.go @@ -119,7 +119,7 @@ func (client APIIssueCommentClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -230,7 +230,7 @@ func (client APIIssueCommentClient) DeletePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -333,7 +333,7 @@ func (client APIIssueCommentClient) GetPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -436,7 +436,7 @@ func (client APIIssueCommentClient) GetEntityTagPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -545,7 +545,7 @@ func (client APIIssueCommentClient) ListByServicePreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apioperation.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apioperation.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apioperation.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apioperation.go index 3cebfc5580a1..00a0255a5a43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apioperation.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apioperation.go @@ -121,7 +121,7 @@ func (client APIOperationClient) CreateOrUpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -228,7 +228,7 @@ func (client APIOperationClient) DeletePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -327,7 +327,7 @@ func (client APIOperationClient) GetPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -426,7 +426,7 @@ func (client APIOperationClient) GetEntityTagPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -532,7 +532,7 @@ func (client APIOperationClient) ListByAPIPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -680,7 +680,7 @@ func (client APIOperationClient) UpdatePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apioperationpolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apioperationpolicy.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apioperationpolicy.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apioperationpolicy.go index 39d06d24776e..e95f0a6df844 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apioperationpolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apioperationpolicy.go @@ -113,7 +113,7 @@ func (client APIOperationPolicyClient) CreateOrUpdatePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -221,7 +221,7 @@ func (client APIOperationPolicyClient) DeletePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -321,7 +321,7 @@ func (client APIOperationPolicyClient) GetPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -421,7 +421,7 @@ func (client APIOperationPolicyClient) GetEntityTagPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -519,7 +519,7 @@ func (client APIOperationPolicyClient) ListByOperationPreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apipolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apipolicy.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apipolicy.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apipolicy.go index 17324ae755b5..faf5b212c3b7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apipolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apipolicy.go @@ -106,7 +106,7 @@ func (client APIPolicyClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -207,7 +207,7 @@ func (client APIPolicyClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -300,7 +300,7 @@ func (client APIPolicyClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -393,7 +393,7 @@ func (client APIPolicyClient) GetEntityTagPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -484,7 +484,7 @@ func (client APIPolicyClient) ListByAPIPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiproduct.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiproduct.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiproduct.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiproduct.go index ee87a4bd53fc..ce0ec03a39fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiproduct.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiproduct.go @@ -111,7 +111,7 @@ func (client APIProductClient) ListByApisPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apirelease.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apirelease.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apirelease.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apirelease.go index 413e738e7f0e..f8404eea5427 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apirelease.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apirelease.go @@ -106,7 +106,7 @@ func (client APIReleaseClient) CreatePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -207,7 +207,7 @@ func (client APIReleaseClient) DeletePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -304,7 +304,7 @@ func (client APIReleaseClient) GetPreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -401,7 +401,7 @@ func (client APIReleaseClient) GetEntityTagPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -506,7 +506,7 @@ func (client APIReleaseClient) ListPreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -652,7 +652,7 @@ func (client APIReleaseClient) UpdatePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apirevisions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apirevisions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apirevisions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apirevisions.go index 9b7084e433a6..3fd8552511f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apirevisions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apirevisions.go @@ -112,7 +112,7 @@ func (client APIRevisionsClient) ListPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apischema.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apischema.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apischema.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apischema.go index eb44c2669d27..dc26569e541f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apischema.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apischema.go @@ -111,7 +111,7 @@ func (client APISchemaClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -217,7 +217,7 @@ func (client APISchemaClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -315,7 +315,7 @@ func (client APISchemaClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -413,7 +413,7 @@ func (client APISchemaClient) GetEntityTagPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -505,7 +505,7 @@ func (client APISchemaClient) ListByAPIPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiversionset.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiversionset.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiversionset.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiversionset.go index 354c582d38be..3e96eff6c9a4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/apiversionset.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/apiversionset.go @@ -108,7 +108,7 @@ func (client APIVersionSetClient) CreateOrUpdatePreparer(ctx context.Context, re "versionSetId": autorest.Encode("path", versionSetID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -207,7 +207,7 @@ func (client APIVersionSetClient) DeletePreparer(ctx context.Context, resourceGr "versionSetId": autorest.Encode("path", versionSetID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -298,7 +298,7 @@ func (client APIVersionSetClient) GetPreparer(ctx context.Context, resourceGroup "versionSetId": autorest.Encode("path", versionSetID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -389,7 +389,7 @@ func (client APIVersionSetClient) GetEntityTagPreparer(ctx context.Context, reso "versionSetId": autorest.Encode("path", versionSetID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -491,7 +491,7 @@ func (client APIVersionSetClient) ListByServicePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -631,7 +631,7 @@ func (client APIVersionSetClient) UpdatePreparer(ctx context.Context, resourceGr "versionSetId": autorest.Encode("path", versionSetID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/authorizationserver.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/authorizationserver.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/authorizationserver.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/authorizationserver.go index 47d2c107adee..ef62a2d25892 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/authorizationserver.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/authorizationserver.go @@ -112,7 +112,7 @@ func (client AuthorizationServerClient) CreateOrUpdatePreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -211,7 +211,7 @@ func (client AuthorizationServerClient) DeletePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -302,7 +302,7 @@ func (client AuthorizationServerClient) GetPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -393,7 +393,7 @@ func (client AuthorizationServerClient) GetEntityTagPreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -490,7 +490,7 @@ func (client AuthorizationServerClient) ListByServicePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -630,7 +630,7 @@ func (client AuthorizationServerClient) UpdatePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/backend.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/backend.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/backend.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/backend.go index ed3c96722918..96f91a993644 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/backend.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/backend.go @@ -108,7 +108,7 @@ func (client BackendClient) CreateOrUpdatePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -207,7 +207,7 @@ func (client BackendClient) DeletePreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -298,7 +298,7 @@ func (client BackendClient) GetPreparer(ctx context.Context, resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -389,7 +389,7 @@ func (client BackendClient) GetEntityTagPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -486,7 +486,7 @@ func (client BackendClient) ListByServicePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -625,7 +625,7 @@ func (client BackendClient) ReconnectPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -723,7 +723,7 @@ func (client BackendClient) UpdatePreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/certificate.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/certificate.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/certificate.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/certificate.go index f531c4589dda..389389662506 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/certificate.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/certificate.go @@ -107,7 +107,7 @@ func (client CertificateClient) CreateOrUpdatePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -207,7 +207,7 @@ func (client CertificateClient) DeletePreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -299,7 +299,7 @@ func (client CertificateClient) GetPreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -391,7 +391,7 @@ func (client CertificateClient) GetEntityTagPreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -490,7 +490,7 @@ func (client CertificateClient) ListByServicePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/client.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/client.go index 6f9a3b876bf9..a1e4456c4fa7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/client.go @@ -1,4 +1,4 @@ -// Package apimanagement implements the Azure ARM Apimanagement service API version 2018-06-01-preview. +// Package apimanagement implements the Azure ARM Apimanagement service API version 2018-01-01. // // ApiManagement Client package apimanagement diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/delegationsettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/delegationsettings.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/delegationsettings.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/delegationsettings.go index 586e3ce5c1c8..c9fd21e6de02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/delegationsettings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/delegationsettings.go @@ -94,7 +94,7 @@ func (client DelegationSettingsClient) CreateOrUpdatePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -181,7 +181,7 @@ func (client DelegationSettingsClient) GetPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -266,7 +266,7 @@ func (client DelegationSettingsClient) GetEntityTagPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -353,7 +353,7 @@ func (client DelegationSettingsClient) UpdatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/diagnostic.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/diagnostic.go similarity index 88% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/diagnostic.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/diagnostic.go index 8a65d8bd17e0..2fa671933e33 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/diagnostic.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/diagnostic.go @@ -70,42 +70,7 @@ func (client DiagnosticClient) CreateOrUpdate(ctx context.Context, resourceGroup {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.LoggerID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.DiagnosticContractProperties.Sampling", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Sampling.Percentage", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Sampling.Percentage", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.DiagnosticContractProperties.Sampling.Percentage", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Frontend", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Request.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Frontend.Response", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Response.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Response.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Frontend.Response.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Backend", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Request.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - {Target: "parameters.DiagnosticContractProperties.Backend.Response", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Response.Body", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Response.Body.Bytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Backend.Response.Body.Bytes", Name: validation.InclusiveMaximum, Rule: int64(8192), Chain: nil}}}, - }}, - }}, - }}, - }}}}}); err != nil { + Chain: []validation.Constraint{{Target: "parameters.DiagnosticContractProperties.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { return result, validation.NewError("apimanagement.DiagnosticClient", "CreateOrUpdate", err.Error()) } @@ -139,7 +104,7 @@ func (client DiagnosticClient) CreateOrUpdatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -238,7 +203,7 @@ func (client DiagnosticClient) DeletePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -329,7 +294,7 @@ func (client DiagnosticClient) GetPreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -420,7 +385,7 @@ func (client DiagnosticClient) GetEntityTagPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -516,7 +481,7 @@ func (client DiagnosticClient) ListByServicePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -656,7 +621,7 @@ func (client DiagnosticClient) UpdatePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/diagnosticlogger.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/diagnosticlogger.go new file mode 100644 index 000000000000..45d6f3b4e6a4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/diagnosticlogger.go @@ -0,0 +1,478 @@ +package apimanagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DiagnosticLoggerClient is the apiManagement Client +type DiagnosticLoggerClient struct { + BaseClient +} + +// NewDiagnosticLoggerClient creates an instance of the DiagnosticLoggerClient client. +func NewDiagnosticLoggerClient(subscriptionID string) DiagnosticLoggerClient { + return NewDiagnosticLoggerClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDiagnosticLoggerClientWithBaseURI creates an instance of the DiagnosticLoggerClient client. +func NewDiagnosticLoggerClientWithBaseURI(baseURI string, subscriptionID string) DiagnosticLoggerClient { + return DiagnosticLoggerClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckEntityExists checks that logger entity specified by identifier is associated with the diagnostics entity. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// loggerid - logger identifier. Must be unique in the API Management service instance. +func (client DiagnosticLoggerClient) CheckEntityExists(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, loggerid string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticLoggerClient.CheckEntityExists") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: loggerid, + Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "loggerid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}}); err != nil { + return result, validation.NewError("apimanagement.DiagnosticLoggerClient", "CheckEntityExists", err.Error()) + } + + req, err := client.CheckEntityExistsPreparer(ctx, resourceGroupName, serviceName, diagnosticID, loggerid) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "CheckEntityExists", nil, "Failure preparing request") + return + } + + resp, err := client.CheckEntityExistsSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "CheckEntityExists", resp, "Failure sending request") + return + } + + result, err = client.CheckEntityExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "CheckEntityExists", resp, "Failure responding to request") + } + + return +} + +// CheckEntityExistsPreparer prepares the CheckEntityExists request. +func (client DiagnosticLoggerClient) CheckEntityExistsPreparer(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, loggerid string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diagnosticId": autorest.Encode("path", diagnosticID), + "loggerid": autorest.Encode("path", loggerid), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}/loggers/{loggerid}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckEntityExistsSender sends the CheckEntityExists request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticLoggerClient) CheckEntityExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckEntityExistsResponder handles the response to the CheckEntityExists request. The method always +// closes the http.Response Body. +func (client DiagnosticLoggerClient) CheckEntityExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate attaches a logger to a diagnostic. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// loggerid - logger identifier. Must be unique in the API Management service instance. +func (client DiagnosticLoggerClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, loggerid string) (result LoggerContract, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticLoggerClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: loggerid, + Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "loggerid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}}); err != nil { + return result, validation.NewError("apimanagement.DiagnosticLoggerClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serviceName, diagnosticID, loggerid) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DiagnosticLoggerClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, loggerid string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diagnosticId": autorest.Encode("path", diagnosticID), + "loggerid": autorest.Encode("path", loggerid), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}/loggers/{loggerid}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticLoggerClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DiagnosticLoggerClient) CreateOrUpdateResponder(resp *http.Response) (result LoggerContract, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified Logger from Diagnostic. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// loggerid - logger identifier. Must be unique in the API Management service instance. +func (client DiagnosticLoggerClient) Delete(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, loggerid string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticLoggerClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: loggerid, + Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "loggerid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}}); err != nil { + return result, validation.NewError("apimanagement.DiagnosticLoggerClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, serviceName, diagnosticID, loggerid) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DiagnosticLoggerClient) DeletePreparer(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, loggerid string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diagnosticId": autorest.Encode("path", diagnosticID), + "loggerid": autorest.Encode("path", loggerid), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}/loggers/{loggerid}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticLoggerClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DiagnosticLoggerClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListByService lists all loggers associated with the specified Diagnostic of the API Management service instance. +// Parameters: +// resourceGroupName - the name of the resource group. +// serviceName - the name of the API Management service. +// diagnosticID - diagnostic identifier. Must be unique in the current API Management service instance. +// filter - | Field | Supported operators | Supported functions | +// |-------------|------------------------|-----------------------------------| +// | id | ge, le, eq, ne, gt, lt | substringof, startswith, endswith | +// | type | eq | | +// top - number of records to return. +// skip - number of records to skip. +func (client DiagnosticLoggerClient) ListByService(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, filter string, top *int32, skip *int32) (result LoggerCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticLoggerClient.ListByService") + defer func() { + sc := -1 + if result.lc.Response.Response != nil { + sc = result.lc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: serviceName, + Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, + {TargetValue: diagnosticID, + Constraints: []validation.Constraint{{Target: "diagnosticID", Name: validation.MaxLength, Rule: 80, Chain: nil}, + {Target: "diagnosticID", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "diagnosticID", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, + {TargetValue: top, + Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}}, + {TargetValue: skip, + Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("apimanagement.DiagnosticLoggerClient", "ListByService", err.Error()) + } + + result.fn = client.listByServiceNextResults + req, err := client.ListByServicePreparer(ctx, resourceGroupName, serviceName, diagnosticID, filter, top, skip) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "ListByService", nil, "Failure preparing request") + return + } + + resp, err := client.ListByServiceSender(req) + if err != nil { + result.lc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "ListByService", resp, "Failure sending request") + return + } + + result.lc, err = client.ListByServiceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "ListByService", resp, "Failure responding to request") + } + + return +} + +// ListByServicePreparer prepares the ListByService request. +func (client DiagnosticLoggerClient) ListByServicePreparer(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, filter string, top *int32, skip *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diagnosticId": autorest.Encode("path", diagnosticID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if skip != nil { + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}/loggers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByServiceSender sends the ListByService request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticLoggerClient) ListByServiceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByServiceResponder handles the response to the ListByService request. The method always +// closes the http.Response Body. +func (client DiagnosticLoggerClient) ListByServiceResponder(resp *http.Response) (result LoggerCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByServiceNextResults retrieves the next set of results, if any. +func (client DiagnosticLoggerClient) listByServiceNextResults(ctx context.Context, lastResults LoggerCollection) (result LoggerCollection, err error) { + req, err := lastResults.loggerCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "listByServiceNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByServiceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "listByServiceNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByServiceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "apimanagement.DiagnosticLoggerClient", "listByServiceNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByServiceComplete enumerates all values, automatically crossing page boundaries as required. +func (client DiagnosticLoggerClient) ListByServiceComplete(ctx context.Context, resourceGroupName string, serviceName string, diagnosticID string, filter string, top *int32, skip *int32) (result LoggerCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticLoggerClient.ListByService") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByService(ctx, resourceGroupName, serviceName, diagnosticID, filter, top, skip) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/emailtemplate.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/emailtemplate.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/emailtemplate.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/emailtemplate.go index 866f3c3e8571..e4f7e2f9b944 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/emailtemplate.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/emailtemplate.go @@ -106,7 +106,7 @@ func (client EmailTemplateClient) CreateOrUpdatePreparer(ctx context.Context, re "templateName": autorest.Encode("path", templateName), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -201,7 +201,7 @@ func (client EmailTemplateClient) DeletePreparer(ctx context.Context, resourceGr "templateName": autorest.Encode("path", templateName), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -288,7 +288,7 @@ func (client EmailTemplateClient) GetPreparer(ctx context.Context, resourceGroup "templateName": autorest.Encode("path", templateName), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -375,7 +375,7 @@ func (client EmailTemplateClient) GetEntityTagPreparer(ctx context.Context, reso "templateName": autorest.Encode("path", templateName), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -468,7 +468,7 @@ func (client EmailTemplateClient) ListByServicePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -599,7 +599,7 @@ func (client EmailTemplateClient) UpdatePreparer(ctx context.Context, resourceGr "templateName": autorest.Encode("path", templateName), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/group.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/group.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/group.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/group.go index c4d7aa5e48fd..25b4b7e010bb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/group.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/group.go @@ -108,7 +108,7 @@ func (client GroupClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -207,7 +207,7 @@ func (client GroupClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -298,7 +298,7 @@ func (client GroupClient) GetPreparer(ctx context.Context, resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -389,7 +389,7 @@ func (client GroupClient) GetEntityTagPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -488,7 +488,7 @@ func (client GroupClient) ListByServicePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -628,7 +628,7 @@ func (client GroupClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/groupuser.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/groupuser.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/groupuser.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/groupuser.go index 4bd921c7a549..8b8418323f80 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/groupuser.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/groupuser.go @@ -105,7 +105,7 @@ func (client GroupUserClient) CheckEntityExistsPreparer(ctx context.Context, res "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -201,7 +201,7 @@ func (client GroupUserClient) CreatePreparer(ctx context.Context, resourceGroupN "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -298,7 +298,7 @@ func (client GroupUserClient) DeletePreparer(ctx context.Context, resourceGroupN "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -406,7 +406,7 @@ func (client GroupUserClient) ListPreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/identityprovider.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/identityprovider.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/identityprovider.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/identityprovider.go index 8a5a174fef3f..16d2a80365e6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/identityprovider.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/identityprovider.go @@ -104,7 +104,7 @@ func (client IdentityProviderClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -199,7 +199,7 @@ func (client IdentityProviderClient) DeletePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -286,7 +286,7 @@ func (client IdentityProviderClient) GetPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -373,7 +373,7 @@ func (client IdentityProviderClient) GetEntityTagPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -458,7 +458,7 @@ func (client IdentityProviderClient) ListByServicePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -585,7 +585,7 @@ func (client IdentityProviderClient) UpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/logger.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/logger.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/logger.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/logger.go index 171c5f82f7c7..620b59f425c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/logger.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/logger.go @@ -106,7 +106,7 @@ func (client LoggerClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -204,7 +204,7 @@ func (client LoggerClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -294,7 +294,7 @@ func (client LoggerClient) GetPreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -384,7 +384,7 @@ func (client LoggerClient) GetEntityTagPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -481,7 +481,7 @@ func (client LoggerClient) ListByServicePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -620,7 +620,7 @@ func (client LoggerClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/models.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/models.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/models.go index 6636fce4c046..55a88944bbe1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/models.go @@ -30,20 +30,7 @@ import ( ) // The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement" - -// AlwaysLog enumerates the values for always log. -type AlwaysLog string - -const ( - // AllErrors Always log all erroneous request regardless of sampling settings. - AllErrors AlwaysLog = "allErrors" -) - -// PossibleAlwaysLogValues returns an array of possible values for the AlwaysLog const type. -func PossibleAlwaysLogValues() []AlwaysLog { - return []AlwaysLog{AllErrors} -} +const fqdn = "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement" // APIType enumerates the values for api type. type APIType string @@ -474,11 +461,11 @@ func PossibleProtocolValues() []Protocol { type ResourceSkuCapacityScaleType string const ( - // Automatic Supported scale type automatic. + // Automatic ... Automatic ResourceSkuCapacityScaleType = "automatic" - // Manual Supported scale type manual. + // Manual ... Manual ResourceSkuCapacityScaleType = "manual" - // None Scaling not supported. + // None ... None ResourceSkuCapacityScaleType = "none" ) @@ -487,27 +474,12 @@ func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType return []ResourceSkuCapacityScaleType{Automatic, Manual, None} } -// SamplingType enumerates the values for sampling type. -type SamplingType string - -const ( - // Fixed Fixed-rate sampling. - Fixed SamplingType = "fixed" -) - -// PossibleSamplingTypeValues returns an array of possible values for the SamplingType const type. -func PossibleSamplingTypeValues() []SamplingType { - return []SamplingType{Fixed} -} - // SkuType enumerates the values for sku type. type SkuType string const ( // SkuTypeBasic Basic SKU of Api Management. SkuTypeBasic SkuType = "Basic" - // SkuTypeConsumption Consumption SKU of Api Management. - SkuTypeConsumption SkuType = "Consumption" // SkuTypeDeveloper Developer SKU of Api Management. SkuTypeDeveloper SkuType = "Developer" // SkuTypePremium Premium SKU of Api Management. @@ -518,7 +490,7 @@ const ( // PossibleSkuTypeValues returns an array of possible values for the SkuType const type. func PossibleSkuTypeValues() []SkuType { - return []SkuType{SkuTypeBasic, SkuTypeConsumption, SkuTypeDeveloper, SkuTypePremium, SkuTypeStandard} + return []SkuType{SkuTypeBasic, SkuTypeDeveloper, SkuTypePremium, SkuTypeStandard} } // SoapAPIType enumerates the values for soap api type. @@ -1002,8 +974,6 @@ type APIContractProperties struct { APIVersionDescription *string `json:"apiVersionDescription,omitempty"` // APIVersionSetID - A resource identifier for the related ApiVersionSet. APIVersionSetID *string `json:"apiVersionSetId,omitempty"` - // SubscriptionRequired - Specifies whether an API or Product subscription is required for accessing the API. - SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` } // APIContractUpdateProperties API update contract properties. @@ -1038,8 +1008,6 @@ type APIContractUpdateProperties struct { APIVersionDescription *string `json:"apiVersionDescription,omitempty"` // APIVersionSetID - A resource identifier for the related ApiVersionSet. APIVersionSetID *string `json:"apiVersionSetId,omitempty"` - // SubscriptionRequired - Specifies whether an API or Product subscription is required for accessing the API. - SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` } // APICreateOrUpdateParameter API Create or Update Parameters. @@ -1124,8 +1092,6 @@ type APICreateOrUpdateProperties struct { APIVersionDescription *string `json:"apiVersionDescription,omitempty"` // APIVersionSetID - A resource identifier for the related ApiVersionSet. APIVersionSetID *string `json:"apiVersionSetId,omitempty"` - // SubscriptionRequired - Specifies whether an API or Product subscription is required for accessing the API. - SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` } // APICreateOrUpdatePropertiesWsdlSelector criteria to limit import of WSDL to a subset of the document. @@ -1160,8 +1126,6 @@ type APIEntityBaseContract struct { APIVersionDescription *string `json:"apiVersionDescription,omitempty"` // APIVersionSetID - A resource identifier for the related ApiVersionSet. APIVersionSetID *string `json:"apiVersionSetId,omitempty"` - // SubscriptionRequired - Specifies whether an API or Product subscription is required for accessing the API. - SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` } // APIExportResult API Export result Blob Uri. @@ -1654,8 +1618,6 @@ type APITagResourceContractProperties struct { APIVersionDescription *string `json:"apiVersionDescription,omitempty"` // APIVersionSetID - A resource identifier for the related ApiVersionSet. APIVersionSetID *string `json:"apiVersionSetId,omitempty"` - // SubscriptionRequired - Specifies whether an API or Product subscription is required for accessing the API. - SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` } // APIUpdateContract API update contract details. @@ -2930,12 +2892,6 @@ func (bup *BackendUpdateParameters) UnmarshalJSON(body []byte) error { return nil } -// BodyDiagnosticSettings body logging settings. -type BodyDiagnosticSettings struct { - // Bytes - Number of request body bytes to log. - Bytes *int32 `json:"bytes,omitempty"` -} - // CertificateCollection paged Certificates list representation. type CertificateCollection struct { autorest.Response `json:"-"` @@ -3504,18 +3460,8 @@ func (dc *DiagnosticContract) UnmarshalJSON(body []byte) error { // DiagnosticContractProperties diagnostic Entity Properties type DiagnosticContractProperties struct { - // AlwaysLog - Specifies for what type of messages sampling settings should not apply. Possible values include: 'AllErrors' - AlwaysLog AlwaysLog `json:"alwaysLog,omitempty"` - // LoggerID - Resource Id of a target logger. - LoggerID *string `json:"loggerId,omitempty"` - // Sampling - Sampling settings for Diagnostic. - Sampling *SamplingSettings `json:"sampling,omitempty"` - // Frontend - Diagnostic settings for incoming/outgoing HTTP messages to the Gateway. - Frontend *PipelineDiagnosticSettings `json:"frontend,omitempty"` - // Backend - Diagnostic settings for incoming/outgoing HTTP messages to the Backend - Backend *PipelineDiagnosticSettings `json:"backend,omitempty"` - // EnableHTTPCorrelationHeaders - Whether to process Correlation Headers coming to Api Management Service. Only applicable to Application Insights diagnostics. Default is true. - EnableHTTPCorrelationHeaders *bool `json:"enableHttpCorrelationHeaders,omitempty"` + // Enabled - Indicates whether a diagnostic should receive data or not. + Enabled *bool `json:"enabled,omitempty"` } // EmailTemplateCollection paged email template list representation. @@ -4129,7 +4075,7 @@ type GroupContractProperties struct { BuiltIn *bool `json:"builtIn,omitempty"` // Type - Group type. Possible values include: 'Custom', 'System', 'External' Type GroupType `json:"type,omitempty"` - // ExternalID - For external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory `aad://.onmicrosoft.com/groups/`; otherwise the value is null. + // ExternalID - For external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory aad://.onmicrosoft.com/groups/; otherwise the value is null. ExternalID *string `json:"externalId,omitempty"` } @@ -4180,7 +4126,7 @@ type GroupCreateParametersProperties struct { Description *string `json:"description,omitempty"` // Type - Group type. Possible values include: 'Custom', 'System', 'External' Type GroupType `json:"type,omitempty"` - // ExternalID - Identifier of the external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory `aad://.onmicrosoft.com/groups/`; otherwise the value is null. + // ExternalID - Identifier of the external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory aad://.onmicrosoft.com/groups/; otherwise the value is null. ExternalID *string `json:"externalId,omitempty"` } @@ -4231,7 +4177,7 @@ type GroupUpdateParametersProperties struct { Description *string `json:"description,omitempty"` // Type - Group type. Possible values include: 'Custom', 'System', 'External' Type GroupType `json:"type,omitempty"` - // ExternalID - Identifier of the external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory `aad://.onmicrosoft.com/groups/`; otherwise the value is null. + // ExternalID - Identifier of the external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory aad://.onmicrosoft.com/groups/; otherwise the value is null. ExternalID *string `json:"externalId,omitempty"` } @@ -4265,14 +4211,6 @@ type HostnameConfigurationOld struct { Certificate *CertificateInformation `json:"certificate,omitempty"` } -// HTTPMessageDiagnostic http message diagnostic settings. -type HTTPMessageDiagnostic struct { - // Headers - Array of HTTP Headers to log. - Headers *[]string `json:"headers,omitempty"` - // Body - Body logging settings. - Body *BodyDiagnosticSettings `json:"body,omitempty"` -} - // IdentityProviderBaseParameters identity Provider Base Parameter Properties. type IdentityProviderBaseParameters struct { // Type - Identity Provider Type identifier. Possible values include: 'Facebook', 'Google', 'Microsoft', 'Twitter', 'Aad', 'AadB2C' @@ -5633,8 +5571,6 @@ type LoggerContractProperties struct { Credentials map[string]*string `json:"credentials"` // IsBuffered - Whether records are buffered in the logger before publishing. Default is assumed to be true. IsBuffered *bool `json:"isBuffered,omitempty"` - // ResourceID - Azure Resource Id of a log target (either Azure Event Hub resource or Azure Application Insights resource). - ResourceID *string `json:"resourceId,omitempty"` } // MarshalJSON is the custom marshaler for LoggerContractProperties. @@ -5652,9 +5588,6 @@ func (lcp LoggerContractProperties) MarshalJSON() ([]byte, error) { if lcp.IsBuffered != nil { objectMap["isBuffered"] = lcp.IsBuffered } - if lcp.ResourceID != nil { - objectMap["resourceId"] = lcp.ResourceID - } return json.Marshal(objectMap) } @@ -6852,14 +6785,6 @@ type ParameterContract struct { Values *[]string `json:"values,omitempty"` } -// PipelineDiagnosticSettings diagnostic settings for incoming/outgoing HTTP messages to the Gateway. -type PipelineDiagnosticSettings struct { - // Request - Diagnostic settings for request. - Request *HTTPMessageDiagnostic `json:"request,omitempty"` - // Response - Diagnostic settings for response. - Response *HTTPMessageDiagnostic `json:"response,omitempty"` -} - // PolicyCollection the response of the list policy operation. type PolicyCollection struct { autorest.Response `json:"-"` @@ -7488,9 +7413,9 @@ type ProductContractProperties struct { Terms *string `json:"terms,omitempty"` // SubscriptionRequired - Whether a product subscription is required for accessing APIs included in this product. If true, the product is referred to as "protected" and a valid subscription key is required for a request to an API included in the product to succeed. If false, the product is referred to as "open" and requests to an API included in the product can be made without a subscription key. If property is omitted when creating a new product it's value is assumed to be true. SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` - // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. + // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. ApprovalRequired *bool `json:"approvalRequired,omitempty"` - // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. + // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. SubscriptionsLimit *int32 `json:"subscriptionsLimit,omitempty"` // State - whether product is published or not. Published products are discoverable by users of developer portal. Non published products are visible only to administrators. Default state of Product is notPublished. Possible values include: 'NotPublished', 'Published' State ProductState `json:"state,omitempty"` @@ -7504,9 +7429,9 @@ type ProductEntityBaseParameters struct { Terms *string `json:"terms,omitempty"` // SubscriptionRequired - Whether a product subscription is required for accessing APIs included in this product. If true, the product is referred to as "protected" and a valid subscription key is required for a request to an API included in the product to succeed. If false, the product is referred to as "open" and requests to an API included in the product can be made without a subscription key. If property is omitted when creating a new product it's value is assumed to be true. SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` - // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. + // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. ApprovalRequired *bool `json:"approvalRequired,omitempty"` - // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. + // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. SubscriptionsLimit *int32 `json:"subscriptionsLimit,omitempty"` // State - whether product is published or not. Published products are discoverable by users of developer portal. Non published products are visible only to administrators. Default state of Product is notPublished. Possible values include: 'NotPublished', 'Published' State ProductState `json:"state,omitempty"` @@ -7524,9 +7449,9 @@ type ProductTagResourceContractProperties struct { Terms *string `json:"terms,omitempty"` // SubscriptionRequired - Whether a product subscription is required for accessing APIs included in this product. If true, the product is referred to as "protected" and a valid subscription key is required for a request to an API included in the product to succeed. If false, the product is referred to as "open" and requests to an API included in the product can be made without a subscription key. If property is omitted when creating a new product it's value is assumed to be true. SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` - // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. + // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. ApprovalRequired *bool `json:"approvalRequired,omitempty"` - // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. + // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. SubscriptionsLimit *int32 `json:"subscriptionsLimit,omitempty"` // State - whether product is published or not. Published products are discoverable by users of developer portal. Non published products are visible only to administrators. Default state of Product is notPublished. Possible values include: 'NotPublished', 'Published' State ProductState `json:"state,omitempty"` @@ -7581,9 +7506,9 @@ type ProductUpdateProperties struct { Terms *string `json:"terms,omitempty"` // SubscriptionRequired - Whether a product subscription is required for accessing APIs included in this product. If true, the product is referred to as "protected" and a valid subscription key is required for a request to an API included in the product to succeed. If false, the product is referred to as "open" and requests to an API included in the product can be made without a subscription key. If property is omitted when creating a new product it's value is assumed to be true. SubscriptionRequired *bool `json:"subscriptionRequired,omitempty"` - // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. + // ApprovalRequired - whether subscription approval is required. If false, new subscriptions will be approved automatically enabling developers to call the product’s APIs immediately after subscribing. If true, administrators must manually approve the subscription before the developer can any of the product’s APIs. Can be present only if subscriptionRequired property is present and has a value of false. ApprovalRequired *bool `json:"approvalRequired,omitempty"` - // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. + // SubscriptionsLimit - Whether the number of subscriptions a user can have to this product at the same time. Set to null or omit to allow unlimited per user subscriptions. Can be present only if subscriptionRequired property is present and has a value of false. SubscriptionsLimit *int32 `json:"subscriptionsLimit,omitempty"` // State - whether product is published or not. Published products are discoverable by users of developer portal. Non published products are visible only to administrators. Default state of Product is notPublished. Possible values include: 'NotPublished', 'Published' State ProductState `json:"state,omitempty"` @@ -8618,7 +8543,7 @@ type Resource struct { // ResourceSku describes an available API Management SKU. type ResourceSku struct { - // Name - Name of the Sku. Possible values include: 'SkuTypeDeveloper', 'SkuTypeStandard', 'SkuTypePremium', 'SkuTypeBasic', 'SkuTypeConsumption' + // Name - Name of the Sku. Possible values include: 'SkuTypeDeveloper', 'SkuTypeStandard', 'SkuTypePremium', 'SkuTypeBasic' Name SkuType `json:"name,omitempty"` } @@ -8802,14 +8727,6 @@ type ResponseContract struct { Headers *[]ParameterContract `json:"headers,omitempty"` } -// SamplingSettings sampling settings for Diagnostic. -type SamplingSettings struct { - // SamplingType - Sampling type. Possible values include: 'Fixed' - SamplingType SamplingType `json:"samplingType,omitempty"` - // Percentage - Rate of sampling for fixed-rate sampling. - Percentage *float64 `json:"percentage,omitempty"` -} - // SaveConfigurationParameter parameters supplied to the Save Tenant Configuration operation. type SaveConfigurationParameter struct { // Branch - The name of the Git branch in which to commit the current configuration snapshot. @@ -9775,7 +9692,7 @@ func (future *ServiceRestoreFuture) Result(client ServiceClient) (sr ServiceReso // ServiceSkuProperties API Management service resource SKU properties. type ServiceSkuProperties struct { - // Name - Name of the Sku. Possible values include: 'SkuTypeDeveloper', 'SkuTypeStandard', 'SkuTypePremium', 'SkuTypeBasic', 'SkuTypeConsumption' + // Name - Name of the Sku. Possible values include: 'SkuTypeDeveloper', 'SkuTypeStandard', 'SkuTypePremium', 'SkuTypeBasic' Name SkuType `json:"name,omitempty"` // Capacity - Capacity of the SKU (number of deployed units of the SKU). The default value is 1. Capacity *int32 `json:"capacity,omitempty"` @@ -10330,10 +10247,10 @@ func (sc *SubscriptionContract) UnmarshalJSON(body []byte) error { // SubscriptionContractProperties subscription details. type SubscriptionContractProperties struct { - // OwnerID - The user resource identifier of the subscription owner. The value is a valid relative URL in the format of /users/{uid} where {uid} is a user identifier. - OwnerID *string `json:"ownerId,omitempty"` - // Scope - Scope like /products/{productId} or /apis or /apis/{apiId}. - Scope *string `json:"scope,omitempty"` + // UserID - The user resource identifier of the subscription owner. The value is a valid relative URL in the format of /users/{uid} where {uid} is a user identifier. + UserID *string `json:"userId,omitempty"` + // ProductID - The product resource identifier of the subscribed product. The value is a valid relative URL in the format of /products/{productId} where {productId} is a product identifier. + ProductID *string `json:"productId,omitempty"` // DisplayName - The name of the subscription, or null if the subscription has no name. DisplayName *string `json:"displayName,omitempty"` // State - Subscription state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated. Possible values include: 'Suspended', 'Active', 'Expired', 'Submitted', 'Rejected', 'Cancelled' @@ -10354,16 +10271,14 @@ type SubscriptionContractProperties struct { SecondaryKey *string `json:"secondaryKey,omitempty"` // StateComment - Optional subscription comment added by an administrator. StateComment *string `json:"stateComment,omitempty"` - // AllowTracing - Determines whether tracing is enabled - AllowTracing *bool `json:"allowTracing,omitempty"` } // SubscriptionCreateParameterProperties parameters supplied to the Create subscription operation. type SubscriptionCreateParameterProperties struct { - // OwnerID - User (user id path) for whom subscription is being created in form /users/{uid} - OwnerID *string `json:"ownerId,omitempty"` - // Scope - Scope like /products/{productId} or /apis or /apis/{apiId}. - Scope *string `json:"scope,omitempty"` + // UserID - User (user id path) for whom subscription is being created in form /users/{uid} + UserID *string `json:"userId,omitempty"` + // ProductID - Product (product id path) for which subscription is being created in form /products/{productId} + ProductID *string `json:"productId,omitempty"` // DisplayName - Subscription name. DisplayName *string `json:"displayName,omitempty"` // PrimaryKey - Primary subscription key. If not specified during request key will be generated automatically. @@ -10372,8 +10287,6 @@ type SubscriptionCreateParameterProperties struct { SecondaryKey *string `json:"secondaryKey,omitempty"` // State - Initial subscription state. If no value is specified, subscription is created with Submitted state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated. Possible values include: 'Suspended', 'Active', 'Expired', 'Submitted', 'Rejected', 'Cancelled' State SubscriptionState `json:"state,omitempty"` - // AllowTracing - Determines whether tracing can be enabled - AllowTracing *bool `json:"allowTracing,omitempty"` } // SubscriptionCreateParameters subscription create details. @@ -10431,10 +10344,10 @@ type SubscriptionsDelegationSettingsProperties struct { // SubscriptionUpdateParameterProperties parameters supplied to the Update subscription operation. type SubscriptionUpdateParameterProperties struct { - // OwnerID - User identifier path: /users/{uid} - OwnerID *string `json:"ownerId,omitempty"` - // Scope - Scope like /products/{productId} or /apis or /apis/{apiId} - Scope *string `json:"scope,omitempty"` + // UserID - User identifier path: /users/{uid} + UserID *string `json:"userId,omitempty"` + // ProductID - Product identifier path: /products/{productId} + ProductID *string `json:"productId,omitempty"` // ExpirationDate - Subscription expiration date. The setting is for audit purposes only and the subscription is not automatically expired. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard. ExpirationDate *date.Time `json:"expirationDate,omitempty"` // DisplayName - Subscription name. @@ -10447,8 +10360,6 @@ type SubscriptionUpdateParameterProperties struct { State SubscriptionState `json:"state,omitempty"` // StateComment - Comments describing subscription state change by the administrator. StateComment *string `json:"stateComment,omitempty"` - // AllowTracing - Determines whether tracing can be enabled - AllowTracing *bool `json:"allowTracing,omitempty"` } // SubscriptionUpdateParameters subscription update details. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/networkstatus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/networkstatus.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/networkstatus.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/networkstatus.go index a9e9f19b7e0d..d4374d7b4c36 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/networkstatus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/networkstatus.go @@ -99,7 +99,7 @@ func (client NetworkStatusClient) ListByLocationPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -185,7 +185,7 @@ func (client NetworkStatusClient) ListByServicePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notification.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notification.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notification.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notification.go index 66b6186e2a6a..89f789c04707 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notification.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notification.go @@ -96,7 +96,7 @@ func (client NotificationClient) CreateOrUpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -187,7 +187,7 @@ func (client NotificationClient) GetPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -281,7 +281,7 @@ func (client NotificationClient) ListByServicePreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notificationrecipientemail.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notificationrecipientemail.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notificationrecipientemail.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notificationrecipientemail.go index 17940b3fcb1a..4c391059297f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notificationrecipientemail.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notificationrecipientemail.go @@ -97,7 +97,7 @@ func (client NotificationRecipientEmailClient) CheckEntityExistsPreparer(ctx con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -185,7 +185,7 @@ func (client NotificationRecipientEmailClient) CreateOrUpdatePreparer(ctx contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -274,7 +274,7 @@ func (client NotificationRecipientEmailClient) DeletePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -360,7 +360,7 @@ func (client NotificationRecipientEmailClient) ListByNotificationPreparer(ctx co "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notificationrecipientuser.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notificationrecipientuser.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notificationrecipientuser.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notificationrecipientuser.go index 030c5ac4ebc2..3e0bd294dd5b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/notificationrecipientuser.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/notificationrecipientuser.go @@ -101,7 +101,7 @@ func (client NotificationRecipientUserClient) CheckEntityExistsPreparer(ctx cont "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -193,7 +193,7 @@ func (client NotificationRecipientUserClient) CreateOrUpdatePreparer(ctx context "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -286,7 +286,7 @@ func (client NotificationRecipientUserClient) DeletePreparer(ctx context.Context "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -372,7 +372,7 @@ func (client NotificationRecipientUserClient) ListByNotificationPreparer(ctx con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/openidconnectprovider.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/openidconnectprovider.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/openidconnectprovider.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/openidconnectprovider.go index bbaed9f315d6..f5e55fc26271 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/openidconnectprovider.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/openidconnectprovider.go @@ -107,7 +107,7 @@ func (client OpenIDConnectProviderClient) CreateOrUpdatePreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -205,7 +205,7 @@ func (client OpenIDConnectProviderClient) DeletePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -295,7 +295,7 @@ func (client OpenIDConnectProviderClient) GetPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -385,7 +385,7 @@ func (client OpenIDConnectProviderClient) GetEntityTagPreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -482,7 +482,7 @@ func (client OpenIDConnectProviderClient) ListByServicePreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -621,7 +621,7 @@ func (client OpenIDConnectProviderClient) UpdatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/operation.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/operation.go similarity index 91% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/operation.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/operation.go index 7db02be7c67e..225a0098007d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/operation.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/operation.go @@ -57,8 +57,7 @@ func NewOperationClientWithBaseURI(baseURI string, subscriptionID string) Operat // | urlTemplate | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // top - number of records to return. // skip - number of records to skip. -// includeNotTaggedOperations - include not tagged operations in response -func (client OperationClient) ListByTags(ctx context.Context, resourceGroupName string, serviceName string, apiid string, filter string, top *int32, skip *int32, includeNotTaggedOperations *bool) (result TagResourceCollectionPage, err error) { +func (client OperationClient) ListByTags(ctx context.Context, resourceGroupName string, serviceName string, apiid string, filter string, top *int32, skip *int32) (result TagResourceCollectionPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OperationClient.ListByTags") defer func() { @@ -88,7 +87,7 @@ func (client OperationClient) ListByTags(ctx context.Context, resourceGroupName } result.fn = client.listByTagsNextResults - req, err := client.ListByTagsPreparer(ctx, resourceGroupName, serviceName, apiid, filter, top, skip, includeNotTaggedOperations) + req, err := client.ListByTagsPreparer(ctx, resourceGroupName, serviceName, apiid, filter, top, skip) if err != nil { err = autorest.NewErrorWithError(err, "apimanagement.OperationClient", "ListByTags", nil, "Failure preparing request") return @@ -110,7 +109,7 @@ func (client OperationClient) ListByTags(ctx context.Context, resourceGroupName } // ListByTagsPreparer prepares the ListByTags request. -func (client OperationClient) ListByTagsPreparer(ctx context.Context, resourceGroupName string, serviceName string, apiid string, filter string, top *int32, skip *int32, includeNotTaggedOperations *bool) (*http.Request, error) { +func (client OperationClient) ListByTagsPreparer(ctx context.Context, resourceGroupName string, serviceName string, apiid string, filter string, top *int32, skip *int32) (*http.Request, error) { pathParameters := map[string]interface{}{ "apiId": autorest.Encode("path", apiid), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -118,7 +117,7 @@ func (client OperationClient) ListByTagsPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -131,11 +130,6 @@ func (client OperationClient) ListByTagsPreparer(ctx context.Context, resourceGr if skip != nil { queryParameters["$skip"] = autorest.Encode("query", *skip) } - if includeNotTaggedOperations != nil { - queryParameters["includeNotTaggedOperations"] = autorest.Encode("query", *includeNotTaggedOperations) - } else { - queryParameters["includeNotTaggedOperations"] = autorest.Encode("query", false) - } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -187,7 +181,7 @@ func (client OperationClient) listByTagsNextResults(ctx context.Context, lastRes } // ListByTagsComplete enumerates all values, automatically crossing page boundaries as required. -func (client OperationClient) ListByTagsComplete(ctx context.Context, resourceGroupName string, serviceName string, apiid string, filter string, top *int32, skip *int32, includeNotTaggedOperations *bool) (result TagResourceCollectionIterator, err error) { +func (client OperationClient) ListByTagsComplete(ctx context.Context, resourceGroupName string, serviceName string, apiid string, filter string, top *int32, skip *int32) (result TagResourceCollectionIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OperationClient.ListByTags") defer func() { @@ -198,6 +192,6 @@ func (client OperationClient) ListByTagsComplete(ctx context.Context, resourceGr tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListByTags(ctx, resourceGroupName, serviceName, apiid, filter, top, skip, includeNotTaggedOperations) + result.page, err = client.ListByTags(ctx, resourceGroupName, serviceName, apiid, filter, top, skip) return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/operations.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/operations.go index 81401d9a5d1c..30ef72a43b4e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/operations.go @@ -76,7 +76,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/policy.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/policy.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/policy.go index 26ad01e234e7..66e5a4dba361 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/policy.go @@ -98,7 +98,7 @@ func (client PolicyClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -188,7 +188,7 @@ func (client PolicyClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -274,7 +274,7 @@ func (client PolicyClient) GetPreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -360,7 +360,7 @@ func (client PolicyClient) GetEntityTagPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -445,7 +445,7 @@ func (client PolicyClient) ListByServicePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/policysnippets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/policysnippets.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/policysnippets.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/policysnippets.go index 9feea61ed4ac..e680cdb0b657 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/policysnippets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/policysnippets.go @@ -94,7 +94,7 @@ func (client PolicySnippetsClient) ListByServicePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/product.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/product.go similarity index 80% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/product.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/product.go index 9adbc03fef7d..8fce6712aee1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/product.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/product.go @@ -108,7 +108,7 @@ func (client ProductClient) CreateOrUpdatePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -208,7 +208,7 @@ func (client ProductClient) DeletePreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -302,7 +302,7 @@ func (client ProductClient) GetPreparer(ctx context.Context, resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -393,7 +393,7 @@ func (client ProductClient) GetEntityTagPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -495,7 +495,7 @@ func (client ProductClient) ListByServicePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -577,159 +577,6 @@ func (client ProductClient) ListByServiceComplete(ctx context.Context, resourceG return } -// ListByTags lists a collection of products associated with tags. -// Parameters: -// resourceGroupName - the name of the resource group. -// serviceName - the name of the API Management service. -// filter - | Field | Supported operators | Supported functions | -// |-------------|------------------------|---------------------------------------------| -// | id | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | -// | name | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | -// | description | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | -// | terms | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | -// | state | eq | substringof, contains, startswith, endswith | -// top - number of records to return. -// skip - number of records to skip. -// includeNotTaggedProducts - include not tagged products in response -func (client ProductClient) ListByTags(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, includeNotTaggedProducts *bool) (result TagResourceCollectionPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ProductClient.ListByTags") - defer func() { - sc := -1 - if result.trc.Response.Response != nil { - sc = result.trc.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: serviceName, - Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}, - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("apimanagement.ProductClient", "ListByTags", err.Error()) - } - - result.fn = client.listByTagsNextResults - req, err := client.ListByTagsPreparer(ctx, resourceGroupName, serviceName, filter, top, skip, includeNotTaggedProducts) - if err != nil { - err = autorest.NewErrorWithError(err, "apimanagement.ProductClient", "ListByTags", nil, "Failure preparing request") - return - } - - resp, err := client.ListByTagsSender(req) - if err != nil { - result.trc.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "apimanagement.ProductClient", "ListByTags", resp, "Failure sending request") - return - } - - result.trc, err = client.ListByTagsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "apimanagement.ProductClient", "ListByTags", resp, "Failure responding to request") - } - - return -} - -// ListByTagsPreparer prepares the ListByTags request. -func (client ProductClient) ListByTagsPreparer(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, includeNotTaggedProducts *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "serviceName": autorest.Encode("path", serviceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-06-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if includeNotTaggedProducts != nil { - queryParameters["includeNotTaggedProducts"] = autorest.Encode("query", *includeNotTaggedProducts) - } else { - queryParameters["includeNotTaggedProducts"] = autorest.Encode("query", false) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/productsByTags", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByTagsSender sends the ListByTags request. The method will close the -// http.Response Body if it receives an error. -func (client ProductClient) ListByTagsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// ListByTagsResponder handles the response to the ListByTags request. The method always -// closes the http.Response Body. -func (client ProductClient) ListByTagsResponder(resp *http.Response) (result TagResourceCollection, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByTagsNextResults retrieves the next set of results, if any. -func (client ProductClient) listByTagsNextResults(ctx context.Context, lastResults TagResourceCollection) (result TagResourceCollection, err error) { - req, err := lastResults.tagResourceCollectionPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "apimanagement.ProductClient", "listByTagsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByTagsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "apimanagement.ProductClient", "listByTagsNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByTagsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "apimanagement.ProductClient", "listByTagsNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByTagsComplete enumerates all values, automatically crossing page boundaries as required. -func (client ProductClient) ListByTagsComplete(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, includeNotTaggedProducts *bool) (result TagResourceCollectionIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ProductClient.ListByTags") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByTags(ctx, resourceGroupName, serviceName, filter, top, skip, includeNotTaggedProducts) - return -} - // Update update product. // Parameters: // resourceGroupName - the name of the resource group. @@ -791,7 +638,7 @@ func (client ProductClient) UpdatePreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productapi.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productapi.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productapi.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productapi.go index e6e1eb98da44..6cf0dcede095 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productapi.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productapi.go @@ -106,7 +106,7 @@ func (client ProductAPIClient) CheckEntityExistsPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -203,7 +203,7 @@ func (client ProductAPIClient) CreateOrUpdatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -301,7 +301,7 @@ func (client ProductAPIClient) DeletePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -407,7 +407,7 @@ func (client ProductAPIClient) ListByProductPreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productgroup.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productgroup.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productgroup.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productgroup.go index 8532b3a37167..9cdb145c2fe7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productgroup.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productgroup.go @@ -105,7 +105,7 @@ func (client ProductGroupClient) CheckEntityExistsPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -201,7 +201,7 @@ func (client ProductGroupClient) CreateOrUpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -298,7 +298,7 @@ func (client ProductGroupClient) DeletePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -403,7 +403,7 @@ func (client ProductGroupClient) ListByProductPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productpolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productpolicy.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productpolicy.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productpolicy.go index 2fdb5266813d..e93f8e961932 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productpolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productpolicy.go @@ -105,7 +105,7 @@ func (client ProductPolicyClient) CreateOrUpdatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -205,7 +205,7 @@ func (client ProductPolicyClient) DeletePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -297,7 +297,7 @@ func (client ProductPolicyClient) GetPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -389,7 +389,7 @@ func (client ProductPolicyClient) GetEntityTagPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -479,7 +479,7 @@ func (client ProductPolicyClient) ListByProductPreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productsubscriptions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productsubscriptions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productsubscriptions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productsubscriptions.go index ccb6204778e2..3157e94441d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/productsubscriptions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/productsubscriptions.go @@ -116,7 +116,7 @@ func (client ProductSubscriptionsClient) ListPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/property.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/property.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/property.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/property.go index 3cd4dc95fa80..9de3170f7817 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/property.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/property.go @@ -112,7 +112,7 @@ func (client PropertyClient) CreateOrUpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -210,7 +210,7 @@ func (client PropertyClient) DeletePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -300,7 +300,7 @@ func (client PropertyClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -390,7 +390,7 @@ func (client PropertyClient) GetEntityTagPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -487,7 +487,7 @@ func (client PropertyClient) ListByServicePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -626,7 +626,7 @@ func (client PropertyClient) UpdatePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/quotabycounterkeys.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/quotabycounterkeys.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/quotabycounterkeys.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/quotabycounterkeys.go index eeb1fbb7d8a3..8003ca763720 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/quotabycounterkeys.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/quotabycounterkeys.go @@ -99,7 +99,7 @@ func (client QuotaByCounterKeysClient) ListByServicePreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -191,7 +191,7 @@ func (client QuotaByCounterKeysClient) UpdatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/quotabyperiodkeys.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/quotabyperiodkeys.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/quotabyperiodkeys.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/quotabyperiodkeys.go index c15370f122a8..8338e2ff0c29 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/quotabyperiodkeys.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/quotabyperiodkeys.go @@ -101,7 +101,7 @@ func (client QuotaByPeriodKeysClient) GetPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -194,7 +194,7 @@ func (client QuotaByPeriodKeysClient) UpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/regions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/regions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/regions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/regions.go index 4902f6db5a45..1c8c024bf521 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/regions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/regions.go @@ -94,7 +94,7 @@ func (client RegionsClient) ListByServicePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/reports.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/reports.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/reports.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/reports.go index 5ae24f08cd3b..214970f4baf3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/reports.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/reports.go @@ -103,7 +103,7 @@ func (client ReportsClient) ListByAPIPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "$filter": autorest.Encode("query", filter), "api-version": APIVersion, @@ -242,7 +242,7 @@ func (client ReportsClient) ListByGeoPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -383,7 +383,7 @@ func (client ReportsClient) ListByOperationPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "$filter": autorest.Encode("query", filter), "api-version": APIVersion, @@ -522,7 +522,7 @@ func (client ReportsClient) ListByProductPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "$filter": autorest.Encode("query", filter), "api-version": APIVersion, @@ -660,7 +660,7 @@ func (client ReportsClient) ListByRequestPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "$filter": autorest.Encode("query", filter), "api-version": APIVersion, @@ -762,7 +762,7 @@ func (client ReportsClient) ListBySubscriptionPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -906,7 +906,7 @@ func (client ReportsClient) ListByTimePreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "interval": autorest.Encode("query", interval), @@ -1048,7 +1048,7 @@ func (client ReportsClient) ListByUserPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "$filter": autorest.Encode("query", filter), "api-version": APIVersion, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/service.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/service.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/service.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/service.go index 96746a886f4b..df72a43d1ad9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/service.go @@ -91,7 +91,7 @@ func (client ServiceClient) ApplyNetworkConfigurationUpdatesPreparer(ctx context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -188,7 +188,7 @@ func (client ServiceClient) BackupPreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -276,7 +276,7 @@ func (client ServiceClient) CheckNameAvailabilityPreparer(ctx context.Context, p "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -370,7 +370,7 @@ func (client ServiceClient) CreateOrUpdatePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -463,7 +463,7 @@ func (client ServiceClient) DeletePreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -547,7 +547,7 @@ func (client ServiceClient) GetPreparer(ctx context.Context, resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -632,7 +632,7 @@ func (client ServiceClient) GetSsoTokenPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -705,7 +705,7 @@ func (client ServiceClient) ListPreparer(ctx context.Context) (*http.Request, er "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -818,7 +818,7 @@ func (client ServiceClient) ListByResourceGroupPreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -941,7 +941,7 @@ func (client ServiceClient) RestorePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1029,7 +1029,7 @@ func (client ServiceClient) UpdatePreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1119,7 +1119,7 @@ func (client ServiceClient) UpdateHostnamePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1217,7 +1217,7 @@ func (client ServiceClient) UploadCertificatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/serviceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/serviceskus.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/serviceskus.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/serviceskus.go index 346249aa265f..d7a6c2ae8778 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/serviceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/serviceskus.go @@ -94,7 +94,7 @@ func (client ServiceSkusClient) ListAvailableServiceSkusPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/signinsettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/signinsettings.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/signinsettings.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/signinsettings.go index 5d2c2266f231..0793337fd21c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/signinsettings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/signinsettings.go @@ -94,7 +94,7 @@ func (client SignInSettingsClient) CreateOrUpdatePreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -181,7 +181,7 @@ func (client SignInSettingsClient) GetPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -266,7 +266,7 @@ func (client SignInSettingsClient) GetEntityTagPreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -353,7 +353,7 @@ func (client SignInSettingsClient) UpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/signupsettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/signupsettings.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/signupsettings.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/signupsettings.go index 86f2c508fc42..9db07bdbe861 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/signupsettings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/signupsettings.go @@ -94,7 +94,7 @@ func (client SignUpSettingsClient) CreateOrUpdatePreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -181,7 +181,7 @@ func (client SignUpSettingsClient) GetPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -266,7 +266,7 @@ func (client SignUpSettingsClient) GetEntityTagPreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -353,7 +353,7 @@ func (client SignUpSettingsClient) UpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/subscription.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/subscription.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/subscription.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/subscription.go index 4ec18de23dfc..77dfe38ae184 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/subscription.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/subscription.go @@ -73,7 +73,8 @@ func (client SubscriptionClient) CreateOrUpdate(ctx context.Context, resourceGro {Target: "sid", Name: validation.Pattern, Rule: `(^[\w]+$)|(^[\w][\w\-]+[\w]$)`, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.SubscriptionCreateParameterProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.SubscriptionCreateParameterProperties.Scope", Name: validation.Null, Rule: true, Chain: nil}, + Chain: []validation.Constraint{{Target: "parameters.SubscriptionCreateParameterProperties.UserID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.SubscriptionCreateParameterProperties.ProductID", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.SubscriptionCreateParameterProperties.DisplayName", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.SubscriptionCreateParameterProperties.DisplayName", Name: validation.MaxLength, Rule: 100, Chain: nil}, {Target: "parameters.SubscriptionCreateParameterProperties.DisplayName", Name: validation.MinLength, Rule: 1, Chain: nil}, @@ -120,7 +121,7 @@ func (client SubscriptionClient) CreateOrUpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -222,7 +223,7 @@ func (client SubscriptionClient) DeletePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -313,7 +314,7 @@ func (client SubscriptionClient) GetPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -404,7 +405,7 @@ func (client SubscriptionClient) GetEntityTagPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -445,8 +446,8 @@ func (client SubscriptionClient) GetEntityTagResponder(resp *http.Response) (res // | id | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | name | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | stateComment | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | -// | ownerId | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | -// | scope | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | +// | userId | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | +// | productId | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // | state | eq | | // top - number of records to return. // skip - number of records to skip. @@ -505,7 +506,7 @@ func (client SubscriptionClient) ListPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -642,7 +643,7 @@ func (client SubscriptionClient) RegeneratePrimaryKeyPreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -732,7 +733,7 @@ func (client SubscriptionClient) RegenerateSecondaryKeyPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -828,7 +829,7 @@ func (client SubscriptionClient) UpdatePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tag.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tag.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tag.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tag.go index 15c281b2dbb6..7f59c28874fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tag.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tag.go @@ -107,7 +107,7 @@ func (client TagClient) AssignToAPIPreparer(ctx context.Context, resourceGroupNa "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -217,7 +217,7 @@ func (client TagClient) AssignToOperationPreparer(ctx context.Context, resourceG "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -319,7 +319,7 @@ func (client TagClient) AssignToProductPreparer(ctx context.Context, resourceGro "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -422,7 +422,7 @@ func (client TagClient) CreateOrUpdatePreparer(ctx context.Context, resourceGrou "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -517,7 +517,7 @@ func (client TagClient) DeletePreparer(ctx context.Context, resourceGroupName st "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -617,7 +617,7 @@ func (client TagClient) DetachFromAPIPreparer(ctx context.Context, resourceGroup "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -724,7 +724,7 @@ func (client TagClient) DetachFromOperationPreparer(ctx context.Context, resourc "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -823,7 +823,7 @@ func (client TagClient) DetachFromProductPreparer(ctx context.Context, resourceG "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -914,7 +914,7 @@ func (client TagClient) GetPreparer(ctx context.Context, resourceGroupName strin "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1012,7 +1012,7 @@ func (client TagClient) GetByAPIPreparer(ctx context.Context, resourceGroupName "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1117,7 +1117,7 @@ func (client TagClient) GetByOperationPreparer(ctx context.Context, resourceGrou "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1214,7 +1214,7 @@ func (client TagClient) GetByProductPreparer(ctx context.Context, resourceGroupN "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1305,7 +1305,7 @@ func (client TagClient) GetEntityStatePreparer(ctx context.Context, resourceGrou "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1402,7 +1402,7 @@ func (client TagClient) GetEntityStateByAPIPreparer(ctx context.Context, resourc "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1506,7 +1506,7 @@ func (client TagClient) GetEntityStateByOperationPreparer(ctx context.Context, r "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1602,7 +1602,7 @@ func (client TagClient) GetEntityStateByProductPreparer(ctx context.Context, res "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1706,7 +1706,7 @@ func (client TagClient) ListByAPIPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1867,7 +1867,7 @@ func (client TagClient) ListByOperationPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2017,7 +2017,7 @@ func (client TagClient) ListByProductPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2106,8 +2106,7 @@ func (client TagClient) ListByProductComplete(ctx context.Context, resourceGroup // | name | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith | // top - number of records to return. // skip - number of records to skip. -// scope - scope like 'apis', 'products' or 'apis/{apiId} -func (client TagClient) ListByService(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, scope string) (result TagCollectionPage, err error) { +func (client TagClient) ListByService(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result TagCollectionPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.ListByService") defer func() { @@ -2133,7 +2132,7 @@ func (client TagClient) ListByService(ctx context.Context, resourceGroupName str } result.fn = client.listByServiceNextResults - req, err := client.ListByServicePreparer(ctx, resourceGroupName, serviceName, filter, top, skip, scope) + req, err := client.ListByServicePreparer(ctx, resourceGroupName, serviceName, filter, top, skip) if err != nil { err = autorest.NewErrorWithError(err, "apimanagement.TagClient", "ListByService", nil, "Failure preparing request") return @@ -2155,14 +2154,14 @@ func (client TagClient) ListByService(ctx context.Context, resourceGroupName str } // ListByServicePreparer prepares the ListByService request. -func (client TagClient) ListByServicePreparer(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, scope string) (*http.Request, error) { +func (client TagClient) ListByServicePreparer(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "serviceName": autorest.Encode("path", serviceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2175,9 +2174,6 @@ func (client TagClient) ListByServicePreparer(ctx context.Context, resourceGroup if skip != nil { queryParameters["$skip"] = autorest.Encode("query", *skip) } - if len(scope) > 0 { - queryParameters["scope"] = autorest.Encode("query", scope) - } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -2229,7 +2225,7 @@ func (client TagClient) listByServiceNextResults(ctx context.Context, lastResult } // ListByServiceComplete enumerates all values, automatically crossing page boundaries as required. -func (client TagClient) ListByServiceComplete(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32, scope string) (result TagCollectionIterator, err error) { +func (client TagClient) ListByServiceComplete(ctx context.Context, resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result TagCollectionIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.ListByService") defer func() { @@ -2240,7 +2236,7 @@ func (client TagClient) ListByServiceComplete(ctx context.Context, resourceGroup tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListByService(ctx, resourceGroupName, serviceName, filter, top, skip, scope) + result.page, err = client.ListByService(ctx, resourceGroupName, serviceName, filter, top, skip) return } @@ -2305,7 +2301,7 @@ func (client TagClient) UpdatePreparer(ctx context.Context, resourceGroupName st "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tagdescription.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tagdescription.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tagdescription.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tagdescription.go index 1d8827822696..f740dfd5ae41 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tagdescription.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tagdescription.go @@ -113,7 +113,7 @@ func (client TagDescriptionClient) CreateOrUpdatePreparer(ctx context.Context, r "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -219,7 +219,7 @@ func (client TagDescriptionClient) DeletePreparer(ctx context.Context, resourceG "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -317,7 +317,7 @@ func (client TagDescriptionClient) GetPreparer(ctx context.Context, resourceGrou "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -415,7 +415,7 @@ func (client TagDescriptionClient) GetEntityStatePreparer(ctx context.Context, r "tagId": autorest.Encode("path", tagID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -520,7 +520,7 @@ func (client TagDescriptionClient) ListByAPIPreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tagresource.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tagresource.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tagresource.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tagresource.go index 349f2a5512d2..9a517bedf603 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tagresource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tagresource.go @@ -117,7 +117,7 @@ func (client TagResourceClient) ListByServicePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantaccess.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantaccess.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantaccess.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantaccess.go index 2dedf74c6ad2..0ce401116f51 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantaccess.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantaccess.go @@ -94,7 +94,7 @@ func (client TenantAccessClient) GetPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -180,7 +180,7 @@ func (client TenantAccessClient) RegeneratePrimaryKeyPreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -265,7 +265,7 @@ func (client TenantAccessClient) RegenerateSecondaryKeyPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -353,7 +353,7 @@ func (client TenantAccessClient) UpdatePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantaccessgit.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantaccessgit.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantaccessgit.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantaccessgit.go index 3bb5c237fd93..c9ce2fa2495f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantaccessgit.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantaccessgit.go @@ -94,7 +94,7 @@ func (client TenantAccessGitClient) GetPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -180,7 +180,7 @@ func (client TenantAccessGitClient) RegeneratePrimaryKeyPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -265,7 +265,7 @@ func (client TenantAccessGitClient) RegenerateSecondaryKeyPreparer(ctx context.C "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantconfiguration.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantconfiguration.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantconfiguration.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantconfiguration.go index 9aea9823149b..8c589d40a64d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/tenantconfiguration.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/tenantconfiguration.go @@ -92,7 +92,7 @@ func (client TenantConfigurationClient) DeployPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -187,7 +187,7 @@ func (client TenantConfigurationClient) GetSyncStatePreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -271,7 +271,7 @@ func (client TenantConfigurationClient) SavePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -363,7 +363,7 @@ func (client TenantConfigurationClient) ValidatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/user.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/user.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/user.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/user.go index 2c17e936e054..1364af2f9fde 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/user.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/user.go @@ -116,7 +116,7 @@ func (client UserClient) CreateOrUpdatePreparer(ctx context.Context, resourceGro "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -217,7 +217,7 @@ func (client UserClient) DeletePreparer(ctx context.Context, resourceGroupName s "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -315,7 +315,7 @@ func (client UserClient) GenerateSsoURLPreparer(ctx context.Context, resourceGro "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -406,7 +406,7 @@ func (client UserClient) GetPreparer(ctx context.Context, resourceGroupName stri "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -497,7 +497,7 @@ func (client UserClient) GetEntityTagPreparer(ctx context.Context, resourceGroup "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -581,7 +581,7 @@ func (client UserClient) GetIdentityPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -675,7 +675,7 @@ func (client UserClient) GetSharedAccessTokenPreparer(ctx context.Context, resou "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -780,7 +780,7 @@ func (client UserClient) ListByServicePreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -920,7 +920,7 @@ func (client UserClient) UpdatePreparer(ctx context.Context, resourceGroupName s "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/usergroup.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/usergroup.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/usergroup.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/usergroup.go index 0e867cff299d..cc890dcb8b19 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/usergroup.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/usergroup.go @@ -113,7 +113,7 @@ func (client UserGroupClient) ListPreparer(ctx context.Context, resourceGroupNam "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/useridentities.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/useridentities.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/useridentities.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/useridentities.go index c415a3a4ea38..0342cfd3117e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/useridentities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/useridentities.go @@ -100,7 +100,7 @@ func (client UserIdentitiesClient) ListPreparer(ctx context.Context, resourceGro "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/usersubscription.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/usersubscription.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/usersubscription.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/usersubscription.go index 547c9946e12e..5e432b3ec4e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/usersubscription.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/usersubscription.go @@ -116,7 +116,7 @@ func (client UserSubscriptionClient) ListPreparer(ctx context.Context, resourceG "uid": autorest.Encode("path", UID), } - const APIVersion = "2018-06-01-preview" + const APIVersion = "2018-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/version.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/version.go index 337037a77233..9147ce76f472 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " apimanagement/2018-06-01-preview" + return "Azure-SDK-For-Go/" + version.Number + " apimanagement/2018-01-01" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cognitiveservices/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cognitiveservices/models.go index f23317d32681..93b6e68402ff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cognitiveservices/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cognitiveservices/models.go @@ -516,6 +516,8 @@ type AccountProperties struct { Endpoint *string `json:"endpoint,omitempty"` // InternalID - The internal identifier. InternalID *string `json:"internalId,omitempty"` + // CustomSubDomainName - Optional subdomain name used for token-based authentication. + CustomSubDomainName *string `json:"customSubDomainName,omitempty"` } // AccountUpdateParameters the parameters to provide for the account. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute/models.go index 298107b8cb58..ca81f8953a49 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute/models.go @@ -2411,9 +2411,9 @@ type DiskProperties struct { EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` // ProvisioningState - The disk provisioning state. ProvisioningState *string `json:"provisioningState,omitempty"` - // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. For a description of the range of values you can set, see [Ultra SSD Managed Disk Offerings](https://docs.microsoft.com/azure/virtual-machines/windows/disks-ultra-ssd#ultra-ssd-managed-disk-offerings). DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` - // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. For a description of the range of values you can set, see [Ultra SSD Managed Disk Offerings](https://docs.microsoft.com/azure/virtual-machines/windows/disks-ultra-ssd#ultra-ssd-managed-disk-offerings). DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` } @@ -4712,7 +4712,7 @@ type OSDiskImage struct { // OSProfile specifies the operating system settings for the virtual machine. type OSProfile struct { - // ComputerName - Specifies the host OS name of the virtual machine.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). + // ComputerName - Specifies the host OS name of the virtual machine.

This name cannot be updated after the VM is created.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). ComputerName *string `json:"computerName,omitempty"` // AdminUsername - Specifies the name of the administrator account.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) AdminUsername *string `json:"adminUsername,omitempty"` @@ -6152,7 +6152,7 @@ type UsageName struct { type VaultCertificate struct { // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } CertificateURL *string `json:"certificateUrl,omitempty"` - // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name .crt for the X509 certificate file and .prv for private key. Both of these files are .pem formatted. + // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are .pem formatted. CertificateStore *string `json:"certificateStore,omitempty"` } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go index a771309dbf33..22242acefbfe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go @@ -59,6 +59,8 @@ func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resource }() } if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile", Name: validation.Null, Rule: false, @@ -166,6 +168,12 @@ func (client ManagedClustersClient) Delete(ctx context.Context, resourceGroupNam tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "Delete", err.Error()) + } + req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", nil, "Failure preparing request") @@ -242,6 +250,12 @@ func (client ManagedClustersClient) Get(ctx context.Context, resourceGroupName s tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "Get", err.Error()) + } + req, err := client.GetPreparer(ctx, resourceGroupName, resourceName) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", nil, "Failure preparing request") @@ -321,6 +335,12 @@ func (client ManagedClustersClient) GetAccessProfile(ctx context.Context, resour tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "GetAccessProfile", err.Error()) + } + req, err := client.GetAccessProfilePreparer(ctx, resourceGroupName, resourceName, roleName) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", nil, "Failure preparing request") @@ -400,6 +420,12 @@ func (client ManagedClustersClient) GetUpgradeProfile(ctx context.Context, resou tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "GetUpgradeProfile", err.Error()) + } + req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", nil, "Failure preparing request") @@ -588,6 +614,12 @@ func (client ManagedClustersClient) ListByResourceGroup(ctx context.Context, res tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListByResourceGroup", err.Error()) + } + result.fn = client.listByResourceGroupNextResults req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) if err != nil { @@ -703,6 +735,12 @@ func (client ManagedClustersClient) ListClusterAdminCredentials(ctx context.Cont tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterAdminCredentials", err.Error()) + } + req, err := client.ListClusterAdminCredentialsPreparer(ctx, resourceGroupName, resourceName) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", nil, "Failure preparing request") @@ -781,6 +819,12 @@ func (client ManagedClustersClient) ListClusterUserCredentials(ctx context.Conte tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterUserCredentials", err.Error()) + } + req, err := client.ListClusterUserCredentialsPreparer(ctx, resourceGroupName, resourceName) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", nil, "Failure preparing request") @@ -843,6 +887,181 @@ func (client ManagedClustersClient) ListClusterUserCredentialsResponder(resp *ht return } +// ResetAADProfile update the AAD Profile for a managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Reset AAD Profile operation for a Managed Cluster. +func (client ManagedClustersClient) ResetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (result ManagedClustersResetAADProfileFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetAADProfile") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ResetAADProfile", err.Error()) + } + + req, err := client.ResetAADProfilePreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", nil, "Failure preparing request") + return + } + + result, err = client.ResetAADProfileSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetAADProfilePreparer prepares the ResetAADProfile request. +func (client ManagedClustersClient) ResetAADProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetAADProfileSender sends the ResetAADProfile request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ResetAADProfileSender(req *http.Request) (future ManagedClustersResetAADProfileFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetAADProfileResponder handles the response to the ResetAADProfile request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ResetAADProfileResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ResetServicePrincipalProfile update the service principal Profile for a managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Reset Service Principal Profile operation for a Managed Cluster. +func (client ManagedClustersClient) ResetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (result ManagedClustersResetServicePrincipalProfileFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetServicePrincipalProfile") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ClientID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", err.Error()) + } + + req, err := client.ResetServicePrincipalProfilePreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", nil, "Failure preparing request") + return + } + + result, err = client.ResetServicePrincipalProfileSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetServicePrincipalProfilePreparer prepares the ResetServicePrincipalProfile request. +func (client ManagedClustersClient) ResetServicePrincipalProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetServicePrincipalProfileSender sends the ResetServicePrincipalProfile request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ResetServicePrincipalProfileSender(req *http.Request) (future ManagedClustersResetServicePrincipalProfileFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetServicePrincipalProfileResponder handles the response to the ResetServicePrincipalProfile request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ResetServicePrincipalProfileResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // UpdateTags updates a managed cluster with the specified tags. // Parameters: // resourceGroupName - the name of the resource group. @@ -859,6 +1078,12 @@ func (client ManagedClustersClient) UpdateTags(ctx context.Context, resourceGrou tracing.EndSpan(ctx, sc, err) }() } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "UpdateTags", err.Error()) + } + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go index 856a824650ce..a0b3584d6f26 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go @@ -671,7 +671,8 @@ type CredentialResult struct { // CredentialResults the list of credential result response. type CredentialResults struct { autorest.Response `json:"-"` - Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` + // Kubeconfigs - Base64-encoded Kubernetes configuration file. + Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` } // CustomProfile properties to configure a custom container service cluster. @@ -1306,6 +1307,8 @@ type ManagedClusterProperties struct { NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` // AadProfile - Profile of Azure Active Directory configuration. AadProfile *ManagedClusterAADProfile `json:"aadProfile,omitempty"` + // APIServerAuthorizedIPRanges - Authorized IP Ranges to kubernetes API server. + APIServerAuthorizedIPRanges *[]string `json:"apiServerAuthorizedIPRanges,omitempty"` } // MarshalJSON is the custom marshaler for ManagedClusterProperties. @@ -1347,6 +1350,9 @@ func (mcp ManagedClusterProperties) MarshalJSON() ([]byte, error) { if mcp.AadProfile != nil { objectMap["aadProfile"] = mcp.AadProfile } + if mcp.APIServerAuthorizedIPRanges != nil { + objectMap["apiServerAuthorizedIPRanges"] = mcp.APIServerAuthorizedIPRanges + } return json.Marshal(objectMap) } @@ -1411,6 +1417,52 @@ type ManagedClusterServicePrincipalProfile struct { Secret *string `json:"secret,omitempty"` } +// ManagedClustersResetAADProfileFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ManagedClustersResetAADProfileFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersResetAADProfileFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetAADProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetAADProfileFuture") + return + } + ar.Response = future.Response() + return +} + +// ManagedClustersResetServicePrincipalProfileFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type ManagedClustersResetServicePrincipalProfileFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersResetServicePrincipalProfileFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetServicePrincipalProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetServicePrincipalProfileFuture") + return + } + ar.Response = future.Response() + return +} + // ManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ManagedClustersUpdateTagsFuture struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/models.go deleted file mode 100644 index 0e931f37207f..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/models.go +++ /dev/null @@ -1,1208 +0,0 @@ -package eventgrid - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid" - -// EndpointType enumerates the values for endpoint type. -type EndpointType string - -const ( - // EndpointTypeEventHub ... - EndpointTypeEventHub EndpointType = "EventHub" - // EndpointTypeEventSubscriptionDestination ... - EndpointTypeEventSubscriptionDestination EndpointType = "EventSubscriptionDestination" - // EndpointTypeWebHook ... - EndpointTypeWebHook EndpointType = "WebHook" -) - -// PossibleEndpointTypeValues returns an array of possible values for the EndpointType const type. -func PossibleEndpointTypeValues() []EndpointType { - return []EndpointType{EndpointTypeEventHub, EndpointTypeEventSubscriptionDestination, EndpointTypeWebHook} -} - -// EventSubscriptionProvisioningState enumerates the values for event subscription provisioning state. -type EventSubscriptionProvisioningState string - -const ( - // Canceled ... - Canceled EventSubscriptionProvisioningState = "Canceled" - // Creating ... - Creating EventSubscriptionProvisioningState = "Creating" - // Deleting ... - Deleting EventSubscriptionProvisioningState = "Deleting" - // Failed ... - Failed EventSubscriptionProvisioningState = "Failed" - // Succeeded ... - Succeeded EventSubscriptionProvisioningState = "Succeeded" - // Updating ... - Updating EventSubscriptionProvisioningState = "Updating" -) - -// PossibleEventSubscriptionProvisioningStateValues returns an array of possible values for the EventSubscriptionProvisioningState const type. -func PossibleEventSubscriptionProvisioningStateValues() []EventSubscriptionProvisioningState { - return []EventSubscriptionProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Updating} -} - -// ResourceRegionType enumerates the values for resource region type. -type ResourceRegionType string - -const ( - // GlobalResource ... - GlobalResource ResourceRegionType = "GlobalResource" - // RegionalResource ... - RegionalResource ResourceRegionType = "RegionalResource" -) - -// PossibleResourceRegionTypeValues returns an array of possible values for the ResourceRegionType const type. -func PossibleResourceRegionTypeValues() []ResourceRegionType { - return []ResourceRegionType{GlobalResource, RegionalResource} -} - -// TopicProvisioningState enumerates the values for topic provisioning state. -type TopicProvisioningState string - -const ( - // TopicProvisioningStateCanceled ... - TopicProvisioningStateCanceled TopicProvisioningState = "Canceled" - // TopicProvisioningStateCreating ... - TopicProvisioningStateCreating TopicProvisioningState = "Creating" - // TopicProvisioningStateDeleting ... - TopicProvisioningStateDeleting TopicProvisioningState = "Deleting" - // TopicProvisioningStateFailed ... - TopicProvisioningStateFailed TopicProvisioningState = "Failed" - // TopicProvisioningStateSucceeded ... - TopicProvisioningStateSucceeded TopicProvisioningState = "Succeeded" - // TopicProvisioningStateUpdating ... - TopicProvisioningStateUpdating TopicProvisioningState = "Updating" -) - -// PossibleTopicProvisioningStateValues returns an array of possible values for the TopicProvisioningState const type. -func PossibleTopicProvisioningStateValues() []TopicProvisioningState { - return []TopicProvisioningState{TopicProvisioningStateCanceled, TopicProvisioningStateCreating, TopicProvisioningStateDeleting, TopicProvisioningStateFailed, TopicProvisioningStateSucceeded, TopicProvisioningStateUpdating} -} - -// TopicTypeProvisioningState enumerates the values for topic type provisioning state. -type TopicTypeProvisioningState string - -const ( - // TopicTypeProvisioningStateCanceled ... - TopicTypeProvisioningStateCanceled TopicTypeProvisioningState = "Canceled" - // TopicTypeProvisioningStateCreating ... - TopicTypeProvisioningStateCreating TopicTypeProvisioningState = "Creating" - // TopicTypeProvisioningStateDeleting ... - TopicTypeProvisioningStateDeleting TopicTypeProvisioningState = "Deleting" - // TopicTypeProvisioningStateFailed ... - TopicTypeProvisioningStateFailed TopicTypeProvisioningState = "Failed" - // TopicTypeProvisioningStateSucceeded ... - TopicTypeProvisioningStateSucceeded TopicTypeProvisioningState = "Succeeded" - // TopicTypeProvisioningStateUpdating ... - TopicTypeProvisioningStateUpdating TopicTypeProvisioningState = "Updating" -) - -// PossibleTopicTypeProvisioningStateValues returns an array of possible values for the TopicTypeProvisioningState const type. -func PossibleTopicTypeProvisioningStateValues() []TopicTypeProvisioningState { - return []TopicTypeProvisioningState{TopicTypeProvisioningStateCanceled, TopicTypeProvisioningStateCreating, TopicTypeProvisioningStateDeleting, TopicTypeProvisioningStateFailed, TopicTypeProvisioningStateSucceeded, TopicTypeProvisioningStateUpdating} -} - -// EventHubEventSubscriptionDestination information about the event hub destination for an event -// subscription -type EventHubEventSubscriptionDestination struct { - // EventHubEventSubscriptionDestinationProperties - Event Hub Properties of the event subscription destination - *EventHubEventSubscriptionDestinationProperties `json:"properties,omitempty"` - // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub' - EndpointType EndpointType `json:"endpointType,omitempty"` -} - -// MarshalJSON is the custom marshaler for EventHubEventSubscriptionDestination. -func (ehesd EventHubEventSubscriptionDestination) MarshalJSON() ([]byte, error) { - ehesd.EndpointType = EndpointTypeEventHub - objectMap := make(map[string]interface{}) - if ehesd.EventHubEventSubscriptionDestinationProperties != nil { - objectMap["properties"] = ehesd.EventHubEventSubscriptionDestinationProperties - } - if ehesd.EndpointType != "" { - objectMap["endpointType"] = ehesd.EndpointType - } - return json.Marshal(objectMap) -} - -// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. -func (ehesd EventHubEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { - return nil, false -} - -// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. -func (ehesd EventHubEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { - return &ehesd, true -} - -// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. -func (ehesd EventHubEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { - return nil, false -} - -// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. -func (ehesd EventHubEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { - return &ehesd, true -} - -// UnmarshalJSON is the custom unmarshaler for EventHubEventSubscriptionDestination struct. -func (ehesd *EventHubEventSubscriptionDestination) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var eventHubEventSubscriptionDestinationProperties EventHubEventSubscriptionDestinationProperties - err = json.Unmarshal(*v, &eventHubEventSubscriptionDestinationProperties) - if err != nil { - return err - } - ehesd.EventHubEventSubscriptionDestinationProperties = &eventHubEventSubscriptionDestinationProperties - } - case "endpointType": - if v != nil { - var endpointType EndpointType - err = json.Unmarshal(*v, &endpointType) - if err != nil { - return err - } - ehesd.EndpointType = endpointType - } - } - } - - return nil -} - -// EventHubEventSubscriptionDestinationProperties the properties for a event hub destination. -type EventHubEventSubscriptionDestinationProperties struct { - // ResourceID - The Azure Resource Id that represents the endpoint of an Event Hub destination of an event subscription. - ResourceID *string `json:"resourceId,omitempty"` -} - -// EventSubscription event Subscription -type EventSubscription struct { - autorest.Response `json:"-"` - // EventSubscriptionProperties - Properties of the event subscription - *EventSubscriptionProperties `json:"properties,omitempty"` - // ID - Fully qualified identifier of the resource - ID *string `json:"id,omitempty"` - // Name - Name of the resource - Name *string `json:"name,omitempty"` - // Type - Type of the resource - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for EventSubscription. -func (es EventSubscription) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if es.EventSubscriptionProperties != nil { - objectMap["properties"] = es.EventSubscriptionProperties - } - if es.ID != nil { - objectMap["id"] = es.ID - } - if es.Name != nil { - objectMap["name"] = es.Name - } - if es.Type != nil { - objectMap["type"] = es.Type - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for EventSubscription struct. -func (es *EventSubscription) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var eventSubscriptionProperties EventSubscriptionProperties - err = json.Unmarshal(*v, &eventSubscriptionProperties) - if err != nil { - return err - } - es.EventSubscriptionProperties = &eventSubscriptionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - es.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - es.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - es.Type = &typeVar - } - } - } - - return nil -} - -// BasicEventSubscriptionDestination information about the destination for an event subscription -type BasicEventSubscriptionDestination interface { - AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) - AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) - AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) -} - -// EventSubscriptionDestination information about the destination for an event subscription -type EventSubscriptionDestination struct { - // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub' - EndpointType EndpointType `json:"endpointType,omitempty"` -} - -func unmarshalBasicEventSubscriptionDestination(body []byte) (BasicEventSubscriptionDestination, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["endpointType"] { - case string(EndpointTypeWebHook): - var whesd WebHookEventSubscriptionDestination - err := json.Unmarshal(body, &whesd) - return whesd, err - case string(EndpointTypeEventHub): - var ehesd EventHubEventSubscriptionDestination - err := json.Unmarshal(body, &ehesd) - return ehesd, err - default: - var esd EventSubscriptionDestination - err := json.Unmarshal(body, &esd) - return esd, err - } -} -func unmarshalBasicEventSubscriptionDestinationArray(body []byte) ([]BasicEventSubscriptionDestination, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - esdArray := make([]BasicEventSubscriptionDestination, len(rawMessages)) - - for index, rawMessage := range rawMessages { - esd, err := unmarshalBasicEventSubscriptionDestination(*rawMessage) - if err != nil { - return nil, err - } - esdArray[index] = esd - } - return esdArray, nil -} - -// MarshalJSON is the custom marshaler for EventSubscriptionDestination. -func (esd EventSubscriptionDestination) MarshalJSON() ([]byte, error) { - esd.EndpointType = EndpointTypeEventSubscriptionDestination - objectMap := make(map[string]interface{}) - if esd.EndpointType != "" { - objectMap["endpointType"] = esd.EndpointType - } - return json.Marshal(objectMap) -} - -// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. -func (esd EventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { - return nil, false -} - -// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. -func (esd EventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { - return nil, false -} - -// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. -func (esd EventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { - return &esd, true -} - -// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. -func (esd EventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { - return &esd, true -} - -// EventSubscriptionFilter filter for the Event Subscription -type EventSubscriptionFilter struct { - // SubjectBeginsWith - An optional string to filter events for an event subscription based on a resource path prefix. - // The format of this depends on the publisher of the events. - // Wildcard characters are not supported in this path. - SubjectBeginsWith *string `json:"subjectBeginsWith,omitempty"` - // SubjectEndsWith - An optional string to filter events for an event subscription based on a resource path suffix. - // Wildcard characters are not supported in this path. - SubjectEndsWith *string `json:"subjectEndsWith,omitempty"` - // IncludedEventTypes - A list of applicable event types that need to be part of the event subscription. - // If it is desired to subscribe to all event types, the string "all" needs to be specified as an element in this list. - IncludedEventTypes *[]string `json:"includedEventTypes,omitempty"` - // IsSubjectCaseSensitive - Specifies if the SubjectBeginsWith and SubjectEndsWith properties of the filter - // should be compared in a case sensitive manner. - IsSubjectCaseSensitive *bool `json:"isSubjectCaseSensitive,omitempty"` -} - -// EventSubscriptionFullURL full endpoint url of an event subscription -type EventSubscriptionFullURL struct { - autorest.Response `json:"-"` - // EndpointURL - The URL that represents the endpoint of the destination of an event subscription. - EndpointURL *string `json:"endpointUrl,omitempty"` -} - -// EventSubscriptionProperties properties of the Event Subscription -type EventSubscriptionProperties struct { - // Topic - Name of the topic of the event subscription. - Topic *string `json:"topic,omitempty"` - // ProvisioningState - Provisioning state of the event subscription. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Canceled', 'Failed' - ProvisioningState EventSubscriptionProvisioningState `json:"provisioningState,omitempty"` - // Destination - Information about the destination where events have to be delivered for the event subscription. - Destination BasicEventSubscriptionDestination `json:"destination,omitempty"` - // Filter - Information about the filter for the event subscription. - Filter *EventSubscriptionFilter `json:"filter,omitempty"` - // Labels - List of user defined labels. - Labels *[]string `json:"labels,omitempty"` -} - -// UnmarshalJSON is the custom unmarshaler for EventSubscriptionProperties struct. -func (esp *EventSubscriptionProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "topic": - if v != nil { - var topic string - err = json.Unmarshal(*v, &topic) - if err != nil { - return err - } - esp.Topic = &topic - } - case "provisioningState": - if v != nil { - var provisioningState EventSubscriptionProvisioningState - err = json.Unmarshal(*v, &provisioningState) - if err != nil { - return err - } - esp.ProvisioningState = provisioningState - } - case "destination": - if v != nil { - destination, err := unmarshalBasicEventSubscriptionDestination(*v) - if err != nil { - return err - } - esp.Destination = destination - } - case "filter": - if v != nil { - var filter EventSubscriptionFilter - err = json.Unmarshal(*v, &filter) - if err != nil { - return err - } - esp.Filter = &filter - } - case "labels": - if v != nil { - var labels []string - err = json.Unmarshal(*v, &labels) - if err != nil { - return err - } - esp.Labels = &labels - } - } - } - - return nil -} - -// EventSubscriptionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type EventSubscriptionsCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *EventSubscriptionsCreateOrUpdateFuture) Result(client EventSubscriptionsClient) (es EventSubscription, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if es.Response.Response, err = future.GetResult(sender); err == nil && es.Response.Response.StatusCode != http.StatusNoContent { - es, err = client.CreateOrUpdateResponder(es.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsCreateOrUpdateFuture", "Result", es.Response.Response, "Failure responding to request") - } - } - return -} - -// EventSubscriptionsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type EventSubscriptionsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *EventSubscriptionsDeleteFuture) Result(client EventSubscriptionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// EventSubscriptionsListResult result of the List EventSubscriptions operation -type EventSubscriptionsListResult struct { - autorest.Response `json:"-"` - // Value - A collection of EventSubscriptions - Value *[]EventSubscription `json:"value,omitempty"` -} - -// EventSubscriptionsUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type EventSubscriptionsUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *EventSubscriptionsUpdateFuture) Result(client EventSubscriptionsClient) (es EventSubscription, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if es.Response.Response, err = future.GetResult(sender); err == nil && es.Response.Response.StatusCode != http.StatusNoContent { - es, err = client.UpdateResponder(es.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsUpdateFuture", "Result", es.Response.Response, "Failure responding to request") - } - } - return -} - -// EventSubscriptionUpdateParameters properties of the Event Subscription update -type EventSubscriptionUpdateParameters struct { - // Destination - Information about the destination where events have to be delivered for the event subscription. - Destination BasicEventSubscriptionDestination `json:"destination,omitempty"` - // Filter - Information about the filter for the event subscription. - Filter *EventSubscriptionFilter `json:"filter,omitempty"` - // Labels - List of user defined labels. - Labels *[]string `json:"labels,omitempty"` -} - -// UnmarshalJSON is the custom unmarshaler for EventSubscriptionUpdateParameters struct. -func (esup *EventSubscriptionUpdateParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "destination": - if v != nil { - destination, err := unmarshalBasicEventSubscriptionDestination(*v) - if err != nil { - return err - } - esup.Destination = destination - } - case "filter": - if v != nil { - var filter EventSubscriptionFilter - err = json.Unmarshal(*v, &filter) - if err != nil { - return err - } - esup.Filter = &filter - } - case "labels": - if v != nil { - var labels []string - err = json.Unmarshal(*v, &labels) - if err != nil { - return err - } - esup.Labels = &labels - } - } - } - - return nil -} - -// EventType event Type for a subject under a topic -type EventType struct { - // EventTypeProperties - Properties of the event type. - *EventTypeProperties `json:"properties,omitempty"` - // ID - Fully qualified identifier of the resource - ID *string `json:"id,omitempty"` - // Name - Name of the resource - Name *string `json:"name,omitempty"` - // Type - Type of the resource - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for EventType. -func (et EventType) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if et.EventTypeProperties != nil { - objectMap["properties"] = et.EventTypeProperties - } - if et.ID != nil { - objectMap["id"] = et.ID - } - if et.Name != nil { - objectMap["name"] = et.Name - } - if et.Type != nil { - objectMap["type"] = et.Type - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for EventType struct. -func (et *EventType) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var eventTypeProperties EventTypeProperties - err = json.Unmarshal(*v, &eventTypeProperties) - if err != nil { - return err - } - et.EventTypeProperties = &eventTypeProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - et.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - et.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - et.Type = &typeVar - } - } - } - - return nil -} - -// EventTypeProperties properties of the event type -type EventTypeProperties struct { - // DisplayName - Display name of the event type. - DisplayName *string `json:"displayName,omitempty"` - // Description - Description of the event type. - Description *string `json:"description,omitempty"` - // SchemaURL - Url of the schema for this event type. - SchemaURL *string `json:"schemaUrl,omitempty"` -} - -// EventTypesListResult result of the List Event Types operation -type EventTypesListResult struct { - autorest.Response `json:"-"` - // Value - A collection of event types - Value *[]EventType `json:"value,omitempty"` -} - -// Operation represents an operation returned by the GetOperations request -type Operation struct { - // Name - Name of the operation - Name *string `json:"name,omitempty"` - // Display - Display name of the operation - Display *OperationInfo `json:"display,omitempty"` - // Origin - Origin of the operation - Origin *string `json:"origin,omitempty"` - // Properties - Properties of the operation - Properties interface{} `json:"properties,omitempty"` -} - -// OperationInfo information about an operation -type OperationInfo struct { - // Provider - Name of the provider - Provider *string `json:"provider,omitempty"` - // Resource - Name of the resource type - Resource *string `json:"resource,omitempty"` - // Operation - Name of the operation - Operation *string `json:"operation,omitempty"` - // Description - Description of the operation - Description *string `json:"description,omitempty"` -} - -// OperationsListResult result of the List Operations operation -type OperationsListResult struct { - autorest.Response `json:"-"` - // Value - A collection of operations - Value *[]Operation `json:"value,omitempty"` -} - -// Resource definition of a Resource -type Resource struct { - // ID - Fully qualified identifier of the resource - ID *string `json:"id,omitempty"` - // Name - Name of the resource - Name *string `json:"name,omitempty"` - // Type - Type of the resource - Type *string `json:"type,omitempty"` -} - -// Topic eventGrid Topic -type Topic struct { - autorest.Response `json:"-"` - // TopicProperties - Properties of the topic - *TopicProperties `json:"properties,omitempty"` - // Location - Location of the resource - Location *string `json:"location,omitempty"` - // Tags - Tags of the resource - Tags map[string]*string `json:"tags"` - // ID - Fully qualified identifier of the resource - ID *string `json:"id,omitempty"` - // Name - Name of the resource - Name *string `json:"name,omitempty"` - // Type - Type of the resource - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Topic. -func (t Topic) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if t.TopicProperties != nil { - objectMap["properties"] = t.TopicProperties - } - if t.Location != nil { - objectMap["location"] = t.Location - } - if t.Tags != nil { - objectMap["tags"] = t.Tags - } - if t.ID != nil { - objectMap["id"] = t.ID - } - if t.Name != nil { - objectMap["name"] = t.Name - } - if t.Type != nil { - objectMap["type"] = t.Type - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Topic struct. -func (t *Topic) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var topicProperties TopicProperties - err = json.Unmarshal(*v, &topicProperties) - if err != nil { - return err - } - t.TopicProperties = &topicProperties - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - t.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - t.Tags = tags - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - t.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - t.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - t.Type = &typeVar - } - } - } - - return nil -} - -// TopicProperties properties of the Topic -type TopicProperties struct { - // ProvisioningState - Provisioning state of the topic. Possible values include: 'TopicProvisioningStateCreating', 'TopicProvisioningStateUpdating', 'TopicProvisioningStateDeleting', 'TopicProvisioningStateSucceeded', 'TopicProvisioningStateCanceled', 'TopicProvisioningStateFailed' - ProvisioningState TopicProvisioningState `json:"provisioningState,omitempty"` - // Endpoint - Endpoint for the topic. - Endpoint *string `json:"endpoint,omitempty"` -} - -// TopicRegenerateKeyRequest topic regenerate share access key request -type TopicRegenerateKeyRequest struct { - // KeyName - Key name to regenerate key1 or key2 - KeyName *string `json:"keyName,omitempty"` -} - -// TopicsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type TopicsCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *TopicsCreateOrUpdateFuture) Result(client TopicsClient) (t Topic, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.TopicsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { - t, err = client.CreateOrUpdateResponder(t.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.TopicsCreateOrUpdateFuture", "Result", t.Response.Response, "Failure responding to request") - } - } - return -} - -// TopicsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type TopicsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *TopicsDeleteFuture) Result(client TopicsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.TopicsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// TopicSharedAccessKeys shared access keys of the Topic -type TopicSharedAccessKeys struct { - autorest.Response `json:"-"` - // Key1 - Shared access key1 for the topic. - Key1 *string `json:"key1,omitempty"` - // Key2 - Shared access key2 for the topic. - Key2 *string `json:"key2,omitempty"` -} - -// TopicsListResult result of the List Topics operation -type TopicsListResult struct { - autorest.Response `json:"-"` - // Value - A collection of Topics - Value *[]Topic `json:"value,omitempty"` -} - -// TopicsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type TopicsUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *TopicsUpdateFuture) Result(client TopicsClient) (t Topic, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.TopicsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { - t, err = client.UpdateResponder(t.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "eventgrid.TopicsUpdateFuture", "Result", t.Response.Response, "Failure responding to request") - } - } - return -} - -// TopicTypeInfo properties of a topic type info. -type TopicTypeInfo struct { - autorest.Response `json:"-"` - // TopicTypeProperties - Properties of the topic type info - *TopicTypeProperties `json:"properties,omitempty"` - // ID - Fully qualified identifier of the resource - ID *string `json:"id,omitempty"` - // Name - Name of the resource - Name *string `json:"name,omitempty"` - // Type - Type of the resource - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TopicTypeInfo. -func (tti TopicTypeInfo) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tti.TopicTypeProperties != nil { - objectMap["properties"] = tti.TopicTypeProperties - } - if tti.ID != nil { - objectMap["id"] = tti.ID - } - if tti.Name != nil { - objectMap["name"] = tti.Name - } - if tti.Type != nil { - objectMap["type"] = tti.Type - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for TopicTypeInfo struct. -func (tti *TopicTypeInfo) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var topicTypeProperties TopicTypeProperties - err = json.Unmarshal(*v, &topicTypeProperties) - if err != nil { - return err - } - tti.TopicTypeProperties = &topicTypeProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - tti.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - tti.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - tti.Type = &typeVar - } - } - } - - return nil -} - -// TopicTypeProperties properties of a topic type. -type TopicTypeProperties struct { - // Provider - Namespace of the provider of the topic type. - Provider *string `json:"provider,omitempty"` - // DisplayName - Display Name for the topic type. - DisplayName *string `json:"displayName,omitempty"` - // Description - Description of the topic type. - Description *string `json:"description,omitempty"` - // ResourceRegionType - Region type of the resource. Possible values include: 'RegionalResource', 'GlobalResource' - ResourceRegionType ResourceRegionType `json:"resourceRegionType,omitempty"` - // ProvisioningState - Provisioning state of the topic type. Possible values include: 'TopicTypeProvisioningStateCreating', 'TopicTypeProvisioningStateUpdating', 'TopicTypeProvisioningStateDeleting', 'TopicTypeProvisioningStateSucceeded', 'TopicTypeProvisioningStateCanceled', 'TopicTypeProvisioningStateFailed' - ProvisioningState TopicTypeProvisioningState `json:"provisioningState,omitempty"` - // SupportedLocations - List of locations supported by this topic type. - SupportedLocations *[]string `json:"supportedLocations,omitempty"` -} - -// TopicTypesListResult result of the List Topic Types operation -type TopicTypesListResult struct { - autorest.Response `json:"-"` - // Value - A collection of topic types - Value *[]TopicTypeInfo `json:"value,omitempty"` -} - -// TopicUpdateParameters properties of the Topic update -type TopicUpdateParameters struct { - // Tags - Tags of the resource - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for TopicUpdateParameters. -func (tup TopicUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tup.Tags != nil { - objectMap["tags"] = tup.Tags - } - return json.Marshal(objectMap) -} - -// TrackedResource definition of a Tracked Resource -type TrackedResource struct { - // Location - Location of the resource - Location *string `json:"location,omitempty"` - // Tags - Tags of the resource - Tags map[string]*string `json:"tags"` - // ID - Fully qualified identifier of the resource - ID *string `json:"id,omitempty"` - // Name - Name of the resource - Name *string `json:"name,omitempty"` - // Type - Type of the resource - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrackedResource. -func (tr TrackedResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tr.Location != nil { - objectMap["location"] = tr.Location - } - if tr.Tags != nil { - objectMap["tags"] = tr.Tags - } - if tr.ID != nil { - objectMap["id"] = tr.ID - } - if tr.Name != nil { - objectMap["name"] = tr.Name - } - if tr.Type != nil { - objectMap["type"] = tr.Type - } - return json.Marshal(objectMap) -} - -// WebHookEventSubscriptionDestination information about the webhook destination for an event subscription -type WebHookEventSubscriptionDestination struct { - // WebHookEventSubscriptionDestinationProperties - WebHook Properties of the event subscription destination - *WebHookEventSubscriptionDestinationProperties `json:"properties,omitempty"` - // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub' - EndpointType EndpointType `json:"endpointType,omitempty"` -} - -// MarshalJSON is the custom marshaler for WebHookEventSubscriptionDestination. -func (whesd WebHookEventSubscriptionDestination) MarshalJSON() ([]byte, error) { - whesd.EndpointType = EndpointTypeWebHook - objectMap := make(map[string]interface{}) - if whesd.WebHookEventSubscriptionDestinationProperties != nil { - objectMap["properties"] = whesd.WebHookEventSubscriptionDestinationProperties - } - if whesd.EndpointType != "" { - objectMap["endpointType"] = whesd.EndpointType - } - return json.Marshal(objectMap) -} - -// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. -func (whesd WebHookEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { - return &whesd, true -} - -// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. -func (whesd WebHookEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { - return nil, false -} - -// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. -func (whesd WebHookEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { - return nil, false -} - -// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. -func (whesd WebHookEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { - return &whesd, true -} - -// UnmarshalJSON is the custom unmarshaler for WebHookEventSubscriptionDestination struct. -func (whesd *WebHookEventSubscriptionDestination) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var webHookEventSubscriptionDestinationProperties WebHookEventSubscriptionDestinationProperties - err = json.Unmarshal(*v, &webHookEventSubscriptionDestinationProperties) - if err != nil { - return err - } - whesd.WebHookEventSubscriptionDestinationProperties = &webHookEventSubscriptionDestinationProperties - } - case "endpointType": - if v != nil { - var endpointType EndpointType - err = json.Unmarshal(*v, &endpointType) - if err != nil { - return err - } - whesd.EndpointType = endpointType - } - } - } - - return nil -} - -// WebHookEventSubscriptionDestinationProperties information about the webhook destination properties for -// an event subscription. -type WebHookEventSubscriptionDestinationProperties struct { - // EndpointURL - The URL that represents the endpoint of the destination of an event subscription. - EndpointURL *string `json:"endpointUrl,omitempty"` - // EndpointBaseURL - The base URL that represents the endpoint of the destination of an event subscription. - EndpointBaseURL *string `json:"endpointBaseUrl,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go index d27076d6f4b9..4cadd1c21c58 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go @@ -703,6 +703,8 @@ type CaptureDescription struct { SizeLimitInBytes *int32 `json:"sizeLimitInBytes,omitempty"` // Destination - Properties of Destination where capture will be stored. (Storage Account, Blob Names) Destination *Destination `json:"destination,omitempty"` + // SkipEmptyArchives - A value that indicates whether to Skip Empty Archives + SkipEmptyArchives *bool `json:"skipEmptyArchives,omitempty"` } // CheckNameAvailabilityParameter parameter supplied to check Namespace name availability operation diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/accountfilters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/accountfilters.go new file mode 100644 index 000000000000..b3f842a5f23e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/accountfilters.go @@ -0,0 +1,487 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AccountFiltersClient is the client for the AccountFilters methods of the Media service. +type AccountFiltersClient struct { + BaseClient +} + +// NewAccountFiltersClient creates an instance of the AccountFiltersClient client. +func NewAccountFiltersClient(subscriptionID string) AccountFiltersClient { + return NewAccountFiltersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountFiltersClientWithBaseURI creates an instance of the AccountFiltersClient client. +func NewAccountFiltersClientWithBaseURI(baseURI string, subscriptionID string) AccountFiltersClient { + return AccountFiltersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an Account Filter in the Media Services account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filterName - the Account Filter name +// parameters - the request parameters +func (client AccountFiltersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, filterName string, parameters AccountFilter) (result AccountFilter, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFiltersClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.FilterProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FilterProperties.FirstQuality", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FilterProperties.FirstQuality.Bitrate", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("media.AccountFiltersClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, filterName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AccountFiltersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, filterName string, parameters AccountFilter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AccountFiltersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AccountFiltersClient) CreateOrUpdateResponder(resp *http.Response) (result AccountFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an Account Filter in the Media Services account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filterName - the Account Filter name +func (client AccountFiltersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, filterName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFiltersClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, filterName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountFiltersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, filterName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountFiltersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountFiltersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of an Account Filter in the Media Services account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filterName - the Account Filter name +func (client AccountFiltersClient) Get(ctx context.Context, resourceGroupName string, accountName string, filterName string) (result AccountFilter, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFiltersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, filterName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AccountFiltersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, filterName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AccountFiltersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AccountFiltersClient) GetResponder(resp *http.Response) (result AccountFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list Account Filters in the Media Services account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +func (client AccountFiltersClient) List(ctx context.Context, resourceGroupName string, accountName string) (result AccountFilterCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFiltersClient.List") + defer func() { + sc := -1 + if result.afc.Response.Response != nil { + sc = result.afc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.afc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "List", resp, "Failure sending request") + return + } + + result.afc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountFiltersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountFiltersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountFiltersClient) ListResponder(resp *http.Response) (result AccountFilterCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client AccountFiltersClient) listNextResults(ctx context.Context, lastResults AccountFilterCollection) (result AccountFilterCollection, err error) { + req, err := lastResults.accountFilterCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.AccountFiltersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.AccountFiltersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client AccountFiltersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result AccountFilterCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFiltersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName) + return +} + +// Update updates an existing Account Filter in the Media Services account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filterName - the Account Filter name +// parameters - the request parameters +func (client AccountFiltersClient) Update(ctx context.Context, resourceGroupName string, accountName string, filterName string, parameters AccountFilter) (result AccountFilter, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFiltersClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, filterName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AccountFiltersClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountFiltersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, filterName string, parameters AccountFilter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountFiltersClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountFiltersClient) UpdateResponder(resp *http.Response) (result AccountFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/assetfilters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/assetfilters.go new file mode 100644 index 000000000000..a3cc34537bae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/assetfilters.go @@ -0,0 +1,497 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AssetFiltersClient is the client for the AssetFilters methods of the Media service. +type AssetFiltersClient struct { + BaseClient +} + +// NewAssetFiltersClient creates an instance of the AssetFiltersClient client. +func NewAssetFiltersClient(subscriptionID string) AssetFiltersClient { + return NewAssetFiltersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAssetFiltersClientWithBaseURI creates an instance of the AssetFiltersClient client. +func NewAssetFiltersClientWithBaseURI(baseURI string, subscriptionID string) AssetFiltersClient { + return AssetFiltersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an Asset Filter associated with the specified Asset. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// filterName - the Asset Filter name +// parameters - the request parameters +func (client AssetFiltersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (result AssetFilter, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.FilterProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FilterProperties.FirstQuality", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.FilterProperties.FirstQuality.Bitrate", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("media.AssetFiltersClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, assetName, filterName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AssetFiltersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AssetFiltersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AssetFiltersClient) CreateOrUpdateResponder(resp *http.Response) (result AssetFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an Asset Filter associated with the specified Asset. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// filterName - the Asset Filter name +func (client AssetFiltersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, assetName, filterName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AssetFiltersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AssetFiltersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AssetFiltersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of an Asset Filter associated with the specified Asset. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// filterName - the Asset Filter name +func (client AssetFiltersClient) Get(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (result AssetFilter, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, assetName, filterName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AssetFiltersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AssetFiltersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AssetFiltersClient) GetResponder(resp *http.Response) (result AssetFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list Asset Filters associated with the specified Asset. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +func (client AssetFiltersClient) List(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result AssetFilterCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.List") + defer func() { + sc := -1 + if result.afc.Response.Response != nil { + sc = result.afc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, assetName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.afc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "List", resp, "Failure sending request") + return + } + + result.afc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AssetFiltersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AssetFiltersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AssetFiltersClient) ListResponder(resp *http.Response) (result AssetFilterCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client AssetFiltersClient) listNextResults(ctx context.Context, lastResults AssetFilterCollection) (result AssetFilterCollection, err error) { + req, err := lastResults.assetFilterCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.AssetFiltersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.AssetFiltersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client AssetFiltersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result AssetFilterCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, assetName) + return +} + +// Update updates an existing Asset Filter associated with the specified Asset. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// filterName - the Asset Filter name +// parameters - the request parameters +func (client AssetFiltersClient) Update(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (result AssetFilter, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFiltersClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, assetName, filterName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetFiltersClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AssetFiltersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, filterName string, parameters AssetFilter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "filterName": autorest.Encode("path", filterName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AssetFiltersClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AssetFiltersClient) UpdateResponder(resp *http.Response) (result AssetFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/assets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/assets.go new file mode 100644 index 000000000000..3d950441f722 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/assets.go @@ -0,0 +1,732 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AssetsClient is the client for the Assets methods of the Media service. +type AssetsClient struct { + BaseClient +} + +// NewAssetsClient creates an instance of the AssetsClient client. +func NewAssetsClient(subscriptionID string) AssetsClient { + return NewAssetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAssetsClientWithBaseURI creates an instance of the AssetsClient client. +func NewAssetsClientWithBaseURI(baseURI string, subscriptionID string) AssetsClient { + return AssetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an Asset in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// parameters - the request parameters +func (client AssetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, assetName string, parameters Asset) (result Asset, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, assetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AssetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, parameters Asset) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AssetsClient) CreateOrUpdateResponder(resp *http.Response) (result Asset, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an Asset in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +func (client AssetsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, assetName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AssetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AssetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of an Asset in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +func (client AssetsClient) Get(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result Asset, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, assetName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AssetsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AssetsClient) GetResponder(resp *http.Response) (result Asset, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetEncryptionKey gets the Asset storage encryption keys used to decrypt content created by version 2 of the Media +// Services API +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +func (client AssetsClient) GetEncryptionKey(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result StorageEncryptedAssetDecryptionData, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.GetEncryptionKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetEncryptionKeyPreparer(ctx, resourceGroupName, accountName, assetName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "GetEncryptionKey", nil, "Failure preparing request") + return + } + + resp, err := client.GetEncryptionKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "GetEncryptionKey", resp, "Failure sending request") + return + } + + result, err = client.GetEncryptionKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "GetEncryptionKey", resp, "Failure responding to request") + } + + return +} + +// GetEncryptionKeyPreparer prepares the GetEncryptionKey request. +func (client AssetsClient) GetEncryptionKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/getEncryptionKey", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetEncryptionKeySender sends the GetEncryptionKey request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) GetEncryptionKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetEncryptionKeyResponder handles the response to the GetEncryptionKey request. The method always +// closes the http.Response Body. +func (client AssetsClient) GetEncryptionKeyResponder(resp *http.Response) (result StorageEncryptedAssetDecryptionData, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list Assets in the Media Services account with optional filtering and ordering +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filter - restricts the set of items returned. +// top - specifies a non-negative integer n that limits the number of items returned from a collection. The +// service returns the number of available items up to but not greater than the specified value n. +// orderby - specifies the the key by which the result collection should be ordered. +func (client AssetsClient) List(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result AssetCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.List") + defer func() { + sc := -1 + if result.ac.Response.Response != nil { + sc = result.ac.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, filter, top, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ac.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "List", resp, "Failure sending request") + return + } + + result.ac, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AssetsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AssetsClient) ListResponder(resp *http.Response) (result AssetCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client AssetsClient) listNextResults(ctx context.Context, lastResults AssetCollection) (result AssetCollection, err error) { + req, err := lastResults.assetCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.AssetsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.AssetsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client AssetsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result AssetCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, filter, top, orderby) + return +} + +// ListContainerSas lists storage container URLs with shared access signatures (SAS) for uploading and downloading +// Asset content. The signatures are derived from the storage account keys. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// parameters - the request parameters +func (client AssetsClient) ListContainerSas(ctx context.Context, resourceGroupName string, accountName string, assetName string, parameters ListContainerSasInput) (result AssetContainerSas, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.ListContainerSas") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListContainerSasPreparer(ctx, resourceGroupName, accountName, assetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "ListContainerSas", nil, "Failure preparing request") + return + } + + resp, err := client.ListContainerSasSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "ListContainerSas", resp, "Failure sending request") + return + } + + result, err = client.ListContainerSasResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "ListContainerSas", resp, "Failure responding to request") + } + + return +} + +// ListContainerSasPreparer prepares the ListContainerSas request. +func (client AssetsClient) ListContainerSasPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, parameters ListContainerSasInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/listContainerSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListContainerSasSender sends the ListContainerSas request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) ListContainerSasSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListContainerSasResponder handles the response to the ListContainerSas request. The method always +// closes the http.Response Body. +func (client AssetsClient) ListContainerSasResponder(resp *http.Response) (result AssetContainerSas, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListStreamingLocators lists Streaming Locators which are associated with this asset. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +func (client AssetsClient) ListStreamingLocators(ctx context.Context, resourceGroupName string, accountName string, assetName string) (result ListStreamingLocatorsResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.ListStreamingLocators") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListStreamingLocatorsPreparer(ctx, resourceGroupName, accountName, assetName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "ListStreamingLocators", nil, "Failure preparing request") + return + } + + resp, err := client.ListStreamingLocatorsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "ListStreamingLocators", resp, "Failure sending request") + return + } + + result, err = client.ListStreamingLocatorsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "ListStreamingLocators", resp, "Failure responding to request") + } + + return +} + +// ListStreamingLocatorsPreparer prepares the ListStreamingLocators request. +func (client AssetsClient) ListStreamingLocatorsPreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/listStreamingLocators", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListStreamingLocatorsSender sends the ListStreamingLocators request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) ListStreamingLocatorsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListStreamingLocatorsResponder handles the response to the ListStreamingLocators request. The method always +// closes the http.Response Body. +func (client AssetsClient) ListStreamingLocatorsResponder(resp *http.Response) (result ListStreamingLocatorsResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates an existing Asset in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// assetName - the Asset name. +// parameters - the request parameters +func (client AssetsClient) Update(ctx context.Context, resourceGroupName string, accountName string, assetName string, parameters Asset) (result Asset, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, assetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.AssetsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AssetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, assetName string, parameters Asset) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "assetName": autorest.Encode("path", assetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AssetsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AssetsClient) UpdateResponder(resp *http.Response) (result Asset, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/client.go new file mode 100644 index 000000000000..500616759e15 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/client.go @@ -0,0 +1,51 @@ +// Package media implements the Azure ARM Media service API version 2018-07-01. +// +// +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Media + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Media. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/contentkeypolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/contentkeypolicies.go new file mode 100644 index 000000000000..ff94e0ecf00c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/contentkeypolicies.go @@ -0,0 +1,577 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ContentKeyPoliciesClient is the client for the ContentKeyPolicies methods of the Media service. +type ContentKeyPoliciesClient struct { + BaseClient +} + +// NewContentKeyPoliciesClient creates an instance of the ContentKeyPoliciesClient client. +func NewContentKeyPoliciesClient(subscriptionID string) ContentKeyPoliciesClient { + return NewContentKeyPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContentKeyPoliciesClientWithBaseURI creates an instance of the ContentKeyPoliciesClient client. +func NewContentKeyPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ContentKeyPoliciesClient { + return ContentKeyPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a Content Key Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// contentKeyPolicyName - the Content Key Policy name. +// parameters - the request parameters +func (client ContentKeyPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string, parameters ContentKeyPolicy) (result ContentKeyPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ContentKeyPolicyProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContentKeyPolicyProperties.Options", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("media.ContentKeyPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, contentKeyPolicyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContentKeyPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string, parameters ContentKeyPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "contentKeyPolicyName": autorest.Encode("path", contentKeyPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/contentKeyPolicies/{contentKeyPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContentKeyPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContentKeyPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ContentKeyPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Content Key Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// contentKeyPolicyName - the Content Key Policy name. +func (client ContentKeyPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, contentKeyPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ContentKeyPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "contentKeyPolicyName": autorest.Encode("path", contentKeyPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/contentKeyPolicies/{contentKeyPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContentKeyPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContentKeyPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of a Content Key Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// contentKeyPolicyName - the Content Key Policy name. +func (client ContentKeyPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string) (result ContentKeyPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, contentKeyPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContentKeyPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "contentKeyPolicyName": autorest.Encode("path", contentKeyPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/contentKeyPolicies/{contentKeyPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContentKeyPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContentKeyPoliciesClient) GetResponder(resp *http.Response) (result ContentKeyPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetPolicyPropertiesWithSecrets get a Content Key Policy including secret values +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// contentKeyPolicyName - the Content Key Policy name. +func (client ContentKeyPoliciesClient) GetPolicyPropertiesWithSecrets(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string) (result ContentKeyPolicyProperties, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.GetPolicyPropertiesWithSecrets") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPolicyPropertiesWithSecretsPreparer(ctx, resourceGroupName, accountName, contentKeyPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "GetPolicyPropertiesWithSecrets", nil, "Failure preparing request") + return + } + + resp, err := client.GetPolicyPropertiesWithSecretsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "GetPolicyPropertiesWithSecrets", resp, "Failure sending request") + return + } + + result, err = client.GetPolicyPropertiesWithSecretsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "GetPolicyPropertiesWithSecrets", resp, "Failure responding to request") + } + + return +} + +// GetPolicyPropertiesWithSecretsPreparer prepares the GetPolicyPropertiesWithSecrets request. +func (client ContentKeyPoliciesClient) GetPolicyPropertiesWithSecretsPreparer(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "contentKeyPolicyName": autorest.Encode("path", contentKeyPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/contentKeyPolicies/{contentKeyPolicyName}/getPolicyPropertiesWithSecrets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPolicyPropertiesWithSecretsSender sends the GetPolicyPropertiesWithSecrets request. The method will close the +// http.Response Body if it receives an error. +func (client ContentKeyPoliciesClient) GetPolicyPropertiesWithSecretsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetPolicyPropertiesWithSecretsResponder handles the response to the GetPolicyPropertiesWithSecrets request. The method always +// closes the http.Response Body. +func (client ContentKeyPoliciesClient) GetPolicyPropertiesWithSecretsResponder(resp *http.Response) (result ContentKeyPolicyProperties, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the Content Key Policies in the account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filter - restricts the set of items returned. +// top - specifies a non-negative integer n that limits the number of items returned from a collection. The +// service returns the number of available items up to but not greater than the specified value n. +// orderby - specifies the the key by which the result collection should be ordered. +func (client ContentKeyPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result ContentKeyPolicyCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.List") + defer func() { + sc := -1 + if result.ckpc.Response.Response != nil { + sc = result.ckpc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, filter, top, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ckpc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "List", resp, "Failure sending request") + return + } + + result.ckpc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContentKeyPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/contentKeyPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContentKeyPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContentKeyPoliciesClient) ListResponder(resp *http.Response) (result ContentKeyPolicyCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ContentKeyPoliciesClient) listNextResults(ctx context.Context, lastResults ContentKeyPolicyCollection) (result ContentKeyPolicyCollection, err error) { + req, err := lastResults.contentKeyPolicyCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContentKeyPoliciesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result ContentKeyPolicyCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, filter, top, orderby) + return +} + +// Update updates an existing Content Key Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// contentKeyPolicyName - the Content Key Policy name. +// parameters - the request parameters +func (client ContentKeyPoliciesClient) Update(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string, parameters ContentKeyPolicy) (result ContentKeyPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPoliciesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, contentKeyPolicyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.ContentKeyPoliciesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ContentKeyPoliciesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, contentKeyPolicyName string, parameters ContentKeyPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "contentKeyPolicyName": autorest.Encode("path", contentKeyPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/contentKeyPolicies/{contentKeyPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ContentKeyPoliciesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ContentKeyPoliciesClient) UpdateResponder(resp *http.Response) (result ContentKeyPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/jobs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/jobs.go new file mode 100644 index 000000000000..557402103736 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/jobs.go @@ -0,0 +1,586 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// JobsClient is the client for the Jobs methods of the Media service. +type JobsClient struct { + BaseClient +} + +// NewJobsClient creates an instance of the JobsClient client. +func NewJobsClient(subscriptionID string) JobsClient { + return NewJobsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobsClientWithBaseURI creates an instance of the JobsClient client. +func NewJobsClientWithBaseURI(baseURI string, subscriptionID string) JobsClient { + return JobsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CancelJob cancel a Job. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// jobName - the Job name. +func (client JobsClient) CancelJob(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.CancelJob") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CancelJobPreparer(ctx, resourceGroupName, accountName, transformName, jobName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "CancelJob", nil, "Failure preparing request") + return + } + + resp, err := client.CancelJobSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.JobsClient", "CancelJob", resp, "Failure sending request") + return + } + + result, err = client.CancelJobResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "CancelJob", resp, "Failure responding to request") + } + + return +} + +// CancelJobPreparer prepares the CancelJob request. +func (client JobsClient) CancelJobPreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}/cancelJob", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelJobSender sends the CancelJob request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) CancelJobSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CancelJobResponder handles the response to the CancelJob request. The method always +// closes the http.Response Body. +func (client JobsClient) CancelJobResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Create creates a Job. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// jobName - the Job name. +// parameters - the request parameters +func (client JobsClient) Create(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string, parameters Job) (result Job, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.JobProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.JobProperties.Input", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.JobProperties.Outputs", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("media.JobsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, transformName, jobName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.JobsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client JobsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string, parameters Job) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client JobsClient) CreateResponder(resp *http.Response) (result Job, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Job. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// jobName - the Job name. +func (client JobsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, transformName, jobName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.JobsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Job. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// jobName - the Job name. +func (client JobsClient) Get(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string) (result Job, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, transformName, jobName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.JobsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobsClient) GetResponder(resp *http.Response) (result Job, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all of the Jobs for the Transform. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// filter - restricts the set of items returned. +// orderby - specifies the the key by which the result collection should be ordered. +func (client JobsClient) List(ctx context.Context, resourceGroupName string, accountName string, transformName string, filter string, orderby string) (result JobCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.List") + defer func() { + sc := -1 + if result.jc.Response.Response != nil { + sc = result.jc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, transformName, filter, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.jc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.JobsClient", "List", resp, "Failure sending request") + return + } + + result.jc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client JobsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, filter string, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client JobsClient) ListResponder(resp *http.Response) (result JobCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client JobsClient) listNextResults(ctx context.Context, lastResults JobCollection) (result JobCollection, err error) { + req, err := lastResults.jobCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.JobsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.JobsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client JobsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, transformName string, filter string, orderby string) (result JobCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, transformName, filter, orderby) + return +} + +// Update update is only supported for description and priority. Updating Priority will take effect when the Job state +// is Queued or Scheduled and depending on the timing the priority update may be ignored. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// jobName - the Job name. +// parameters - the request parameters +func (client JobsClient) Update(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string, parameters Job) (result Job, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, transformName, jobName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.JobsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.JobsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client JobsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, jobName string, parameters Job) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}/jobs/{jobName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client JobsClient) UpdateResponder(resp *http.Response) (result Job, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/liveevents.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/liveevents.go new file mode 100644 index 000000000000..9c8a58ad7d34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/liveevents.go @@ -0,0 +1,778 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// LiveEventsClient is the client for the LiveEvents methods of the Media service. +type LiveEventsClient struct { + BaseClient +} + +// NewLiveEventsClient creates an instance of the LiveEventsClient client. +func NewLiveEventsClient(subscriptionID string) LiveEventsClient { + return NewLiveEventsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLiveEventsClientWithBaseURI creates an instance of the LiveEventsClient client. +func NewLiveEventsClientWithBaseURI(baseURI string, subscriptionID string) LiveEventsClient { + return LiveEventsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +// parameters - live Event properties needed for creation. +// autoStart - the flag indicates if the resource should be automatically started on creation. +func (client LiveEventsClient) Create(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, parameters LiveEvent, autoStart *bool) (result LiveEventsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.LiveEventProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.LiveEventProperties.Input", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, liveEventName, parameters, autoStart) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client LiveEventsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, parameters LiveEvent, autoStart *bool) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if autoStart != nil { + queryParameters["autoStart"] = autorest.Encode("query", *autoStart) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) CreateSender(req *http.Request) (future LiveEventsCreateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) CreateResponder(resp *http.Response) (result LiveEvent, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +func (client LiveEventsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (result LiveEventsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, liveEventName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LiveEventsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) DeleteSender(req *http.Request) (future LiveEventsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +func (client LiveEventsClient) Get(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (result LiveEvent, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, liveEventName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LiveEventsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) GetResponder(resp *http.Response) (result LiveEvent, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the Live Events in the account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +func (client LiveEventsClient) List(ctx context.Context, resourceGroupName string, accountName string) (result LiveEventListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.List") + defer func() { + sc := -1 + if result.lelr.Response.Response != nil { + sc = result.lelr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lelr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "List", resp, "Failure sending request") + return + } + + result.lelr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LiveEventsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) ListResponder(resp *http.Response) (result LiveEventListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client LiveEventsClient) listNextResults(ctx context.Context, lastResults LiveEventListResult) (result LiveEventListResult, err error) { + req, err := lastResults.liveEventListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.LiveEventsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.LiveEventsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client LiveEventsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result LiveEventListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName) + return +} + +// Reset resets an existing Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +func (client LiveEventsClient) Reset(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (result LiveEventsResetFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Reset") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Reset", err.Error()) + } + + req, err := client.ResetPreparer(ctx, resourceGroupName, accountName, liveEventName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Reset", nil, "Failure preparing request") + return + } + + result, err = client.ResetSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Reset", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetPreparer prepares the Reset request. +func (client LiveEventsClient) ResetPreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/reset", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetSender sends the Reset request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) ResetSender(req *http.Request) (future LiveEventsResetFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetResponder handles the response to the Reset request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) ResetResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start starts an existing Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +func (client LiveEventsClient) Start(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (result LiveEventsStartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Start") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Start", err.Error()) + } + + req, err := client.StartPreparer(ctx, resourceGroupName, accountName, liveEventName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client LiveEventsClient) StartPreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) StartSender(req *http.Request) (future LiveEventsStartFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop stops an existing Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +// parameters - liveEvent stop parameters +func (client LiveEventsClient) Stop(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, parameters LiveEventActionInput) (result LiveEventsStopFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Stop") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Stop", err.Error()) + } + + req, err := client.StopPreparer(ctx, resourceGroupName, accountName, liveEventName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Stop", nil, "Failure preparing request") + return + } + + result, err = client.StopSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Stop", result.Response(), "Failure sending request") + return + } + + return +} + +// StopPreparer prepares the Stop request. +func (client LiveEventsClient) StopPreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, parameters LiveEventActionInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/stop", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) StopSender(req *http.Request) (future LiveEventsStopFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates a existing Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +// parameters - live Event properties needed for creation. +func (client LiveEventsClient) Update(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, parameters LiveEvent) (result LiveEventsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveEventsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, liveEventName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client LiveEventsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, parameters LiveEvent) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client LiveEventsClient) UpdateSender(req *http.Request) (future LiveEventsUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client LiveEventsClient) UpdateResponder(resp *http.Response) (result LiveEvent, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/liveoutputs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/liveoutputs.go new file mode 100644 index 000000000000..0ba2a6060f19 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/liveoutputs.go @@ -0,0 +1,453 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// LiveOutputsClient is the client for the LiveOutputs methods of the Media service. +type LiveOutputsClient struct { + BaseClient +} + +// NewLiveOutputsClient creates an instance of the LiveOutputsClient client. +func NewLiveOutputsClient(subscriptionID string) LiveOutputsClient { + return NewLiveOutputsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLiveOutputsClientWithBaseURI creates an instance of the LiveOutputsClient client. +func NewLiveOutputsClientWithBaseURI(baseURI string, subscriptionID string) LiveOutputsClient { + return LiveOutputsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a Live Output. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +// liveOutputName - the name of the Live Output. +// parameters - live Output properties needed for creation. +func (client LiveOutputsClient) Create(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string, parameters LiveOutput) (result LiveOutputsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}, + {TargetValue: liveOutputName, + Constraints: []validation.Constraint{{Target: "liveOutputName", Name: validation.MaxLength, Rule: 256, Chain: nil}, + {Target: "liveOutputName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveOutputName", Name: validation.Pattern, Rule: `^([a-zA-Z0-9])+(-*[a-zA-Z0-9])*$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.LiveOutputProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.LiveOutputProperties.AssetName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LiveOutputProperties.ArchiveWindowLength", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("media.LiveOutputsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, liveEventName, liveOutputName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client LiveOutputsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string, parameters LiveOutput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "liveOutputName": autorest.Encode("path", liveOutputName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs/{liveOutputName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client LiveOutputsClient) CreateSender(req *http.Request) (future LiveOutputsCreateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client LiveOutputsClient) CreateResponder(resp *http.Response) (result LiveOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Live Output. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +// liveOutputName - the name of the Live Output. +func (client LiveOutputsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string) (result LiveOutputsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}, + {TargetValue: liveOutputName, + Constraints: []validation.Constraint{{Target: "liveOutputName", Name: validation.MaxLength, Rule: 256, Chain: nil}, + {Target: "liveOutputName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveOutputName", Name: validation.Pattern, Rule: `^([a-zA-Z0-9])+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveOutputsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, liveEventName, liveOutputName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LiveOutputsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "liveOutputName": autorest.Encode("path", liveOutputName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs/{liveOutputName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LiveOutputsClient) DeleteSender(req *http.Request) (future LiveOutputsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LiveOutputsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Live Output. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +// liveOutputName - the name of the Live Output. +func (client LiveOutputsClient) Get(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string) (result LiveOutput, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}, + {TargetValue: liveOutputName, + Constraints: []validation.Constraint{{Target: "liveOutputName", Name: validation.MaxLength, Rule: 256, Chain: nil}, + {Target: "liveOutputName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveOutputName", Name: validation.Pattern, Rule: `^([a-zA-Z0-9])+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveOutputsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, liveEventName, liveOutputName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LiveOutputsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "liveOutputName": autorest.Encode("path", liveOutputName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs/{liveOutputName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LiveOutputsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LiveOutputsClient) GetResponder(resp *http.Response) (result LiveOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the Live Outputs in the Live Event. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// liveEventName - the name of the Live Event. +func (client LiveOutputsClient) List(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (result LiveOutputListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputsClient.List") + defer func() { + sc := -1 + if result.lolr.Response.Response != nil { + sc = result.lolr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: liveEventName, + Constraints: []validation.Constraint{{Target: "liveEventName", Name: validation.MaxLength, Rule: 32, Chain: nil}, + {Target: "liveEventName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "liveEventName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.LiveOutputsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, liveEventName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lolr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "List", resp, "Failure sending request") + return + } + + result.lolr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LiveOutputsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "liveEventName": autorest.Encode("path", liveEventName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LiveOutputsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LiveOutputsClient) ListResponder(resp *http.Response) (result LiveOutputListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client LiveOutputsClient) listNextResults(ctx context.Context, lastResults LiveOutputListResult) (result LiveOutputListResult, err error) { + req, err := lastResults.liveOutputListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.LiveOutputsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.LiveOutputsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client LiveOutputsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, liveEventName string) (result LiveOutputListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, liveEventName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/locations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/locations.go new file mode 100644 index 000000000000..ff32beb8cda8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/locations.go @@ -0,0 +1,119 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// LocationsClient is the client for the Locations methods of the Media service. +type LocationsClient struct { + BaseClient +} + +// NewLocationsClient creates an instance of the LocationsClient client. +func NewLocationsClient(subscriptionID string) LocationsClient { + return NewLocationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLocationsClientWithBaseURI creates an instance of the LocationsClient client. +func NewLocationsClientWithBaseURI(baseURI string, subscriptionID string) LocationsClient { + return LocationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks whether the Media Service resource name is available. +// Parameters: +// locationName - the name of the location +// parameters - the request parameters +func (client LocationsClient) CheckNameAvailability(ctx context.Context, locationName string, parameters CheckNameAvailabilityInput) (result EntityNameAvailabilityCheckOutput, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocationsClient.CheckNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckNameAvailabilityPreparer(ctx, locationName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LocationsClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.LocationsClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LocationsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client LocationsClient) CheckNameAvailabilityPreparer(ctx context.Context, locationName string, parameters CheckNameAvailabilityInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "locationName": autorest.Encode("path", locationName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Media/locations/{locationName}/checkNameAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client LocationsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client LocationsClient) CheckNameAvailabilityResponder(resp *http.Response) (result EntityNameAvailabilityCheckOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/mediaservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/mediaservices.go new file mode 100644 index 000000000000..5512129ebfbb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/mediaservices.go @@ -0,0 +1,731 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MediaservicesClient is the client for the Mediaservices methods of the Media service. +type MediaservicesClient struct { + BaseClient +} + +// NewMediaservicesClient creates an instance of the MediaservicesClient client. +func NewMediaservicesClient(subscriptionID string) MediaservicesClient { + return NewMediaservicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMediaservicesClientWithBaseURI creates an instance of the MediaservicesClient client. +func NewMediaservicesClientWithBaseURI(baseURI string, subscriptionID string) MediaservicesClient { + return MediaservicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// parameters - the request parameters +func (client MediaservicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, parameters Service) (result Service, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client MediaservicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters Service) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) CreateOrUpdateResponder(resp *http.Response) (result Service, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +func (client MediaservicesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client MediaservicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of a Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +func (client MediaservicesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result Service, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client MediaservicesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) GetResponder(resp *http.Response) (result Service, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetBySubscription get the details of a Media Services account +// Parameters: +// accountName - the Media Services account name. +func (client MediaservicesClient) GetBySubscription(ctx context.Context, accountName string) (result SubscriptionMediaService, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.GetBySubscription") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetBySubscriptionPreparer(ctx, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "GetBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.GetBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "GetBySubscription", resp, "Failure sending request") + return + } + + result, err = client.GetBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "GetBySubscription", resp, "Failure responding to request") + } + + return +} + +// GetBySubscriptionPreparer prepares the GetBySubscription request. +func (client MediaservicesClient) GetBySubscriptionPreparer(ctx context.Context, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Media/mediaservices/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetBySubscriptionSender sends the GetBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) GetBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetBySubscriptionResponder handles the response to the GetBySubscription request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) GetBySubscriptionResponder(resp *http.Response) (result SubscriptionMediaService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list Media Services accounts in the resource group +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +func (client MediaservicesClient) List(ctx context.Context, resourceGroupName string) (result ServiceCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.List") + defer func() { + sc := -1 + if result.sc.Response.Response != nil { + sc = result.sc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "List", resp, "Failure sending request") + return + } + + result.sc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client MediaservicesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) ListResponder(resp *http.Response) (result ServiceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client MediaservicesClient) listNextResults(ctx context.Context, lastResults ServiceCollection) (result ServiceCollection, err error) { + req, err := lastResults.serviceCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.MediaservicesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.MediaservicesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client MediaservicesClient) ListComplete(ctx context.Context, resourceGroupName string) (result ServiceCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListBySubscription list Media Services accounts in the subscription. +func (client MediaservicesClient) ListBySubscription(ctx context.Context) (result SubscriptionMediaServiceCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.ListBySubscription") + defer func() { + sc := -1 + if result.smsc.Response.Response != nil { + sc = result.smsc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listBySubscriptionNextResults + req, err := client.ListBySubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.smsc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result.smsc, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client MediaservicesClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Media/mediaservices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) ListBySubscriptionResponder(resp *http.Response) (result SubscriptionMediaServiceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listBySubscriptionNextResults retrieves the next set of results, if any. +func (client MediaservicesClient) listBySubscriptionNextResults(ctx context.Context, lastResults SubscriptionMediaServiceCollection) (result SubscriptionMediaServiceCollection, err error) { + req, err := lastResults.subscriptionMediaServiceCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.MediaservicesClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.MediaservicesClient", "listBySubscriptionNextResults", resp, "Failure sending next results request") + } + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. +func (client MediaservicesClient) ListBySubscriptionComplete(ctx context.Context) (result SubscriptionMediaServiceCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListBySubscription(ctx) + return +} + +// SyncStorageKeys synchronizes storage account keys for a storage account associated with the Media Service account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// parameters - the request parameters +func (client MediaservicesClient) SyncStorageKeys(ctx context.Context, resourceGroupName string, accountName string, parameters SyncStorageKeysInput) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.SyncStorageKeys") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.SyncStorageKeysPreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "SyncStorageKeys", nil, "Failure preparing request") + return + } + + resp, err := client.SyncStorageKeysSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "SyncStorageKeys", resp, "Failure sending request") + return + } + + result, err = client.SyncStorageKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "SyncStorageKeys", resp, "Failure responding to request") + } + + return +} + +// SyncStorageKeysPreparer prepares the SyncStorageKeys request. +func (client MediaservicesClient) SyncStorageKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters SyncStorageKeysInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/syncStorageKeys", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SyncStorageKeysSender sends the SyncStorageKeys request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) SyncStorageKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// SyncStorageKeysResponder handles the response to the SyncStorageKeys request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) SyncStorageKeysResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates an existing Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// parameters - the request parameters +func (client MediaservicesClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters Service) (result Service, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MediaservicesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.MediaservicesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client MediaservicesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters Service) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client MediaservicesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client MediaservicesClient) UpdateResponder(resp *http.Response) (result Service, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/models.go new file mode 100644 index 000000000000..d2f175fc8f8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/models.go @@ -0,0 +1,10876 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media" + +// AacAudioProfile enumerates the values for aac audio profile. +type AacAudioProfile string + +const ( + // AacLc Specifies that the output audio is to be encoded into AAC Low Complexity profile (AAC-LC). + AacLc AacAudioProfile = "AacLc" + // HeAacV1 Specifies that the output audio is to be encoded into HE-AAC v1 profile. + HeAacV1 AacAudioProfile = "HeAacV1" + // HeAacV2 Specifies that the output audio is to be encoded into HE-AAC v2 profile. + HeAacV2 AacAudioProfile = "HeAacV2" +) + +// PossibleAacAudioProfileValues returns an array of possible values for the AacAudioProfile const type. +func PossibleAacAudioProfileValues() []AacAudioProfile { + return []AacAudioProfile{AacLc, HeAacV1, HeAacV2} +} + +// AssetContainerPermission enumerates the values for asset container permission. +type AssetContainerPermission string + +const ( + // Read The SAS URL will allow read access to the container. + Read AssetContainerPermission = "Read" + // ReadWrite The SAS URL will allow read and write access to the container. + ReadWrite AssetContainerPermission = "ReadWrite" + // ReadWriteDelete The SAS URL will allow read, write and delete access to the container. + ReadWriteDelete AssetContainerPermission = "ReadWriteDelete" +) + +// PossibleAssetContainerPermissionValues returns an array of possible values for the AssetContainerPermission const type. +func PossibleAssetContainerPermissionValues() []AssetContainerPermission { + return []AssetContainerPermission{Read, ReadWrite, ReadWriteDelete} +} + +// AssetStorageEncryptionFormat enumerates the values for asset storage encryption format. +type AssetStorageEncryptionFormat string + +const ( + // MediaStorageClientEncryption The Asset is encrypted with Media Services client-side encryption. + MediaStorageClientEncryption AssetStorageEncryptionFormat = "MediaStorageClientEncryption" + // None The Asset does not use client-side storage encryption (this is the only allowed value for new + // Assets). + None AssetStorageEncryptionFormat = "None" +) + +// PossibleAssetStorageEncryptionFormatValues returns an array of possible values for the AssetStorageEncryptionFormat const type. +func PossibleAssetStorageEncryptionFormatValues() []AssetStorageEncryptionFormat { + return []AssetStorageEncryptionFormat{MediaStorageClientEncryption, None} +} + +// ContentKeyPolicyFairPlayRentalAndLeaseKeyType enumerates the values for content key policy fair play rental +// and lease key type. +type ContentKeyPolicyFairPlayRentalAndLeaseKeyType string + +const ( + // PersistentLimited Content key can be persisted and the valid duration is limited by the Rental Duration + // value + PersistentLimited ContentKeyPolicyFairPlayRentalAndLeaseKeyType = "PersistentLimited" + // PersistentUnlimited Content key can be persisted with an unlimited duration + PersistentUnlimited ContentKeyPolicyFairPlayRentalAndLeaseKeyType = "PersistentUnlimited" + // Undefined Key duration is not specified. + Undefined ContentKeyPolicyFairPlayRentalAndLeaseKeyType = "Undefined" + // Unknown Represents a ContentKeyPolicyFairPlayRentalAndLeaseKeyType that is unavailable in current API + // version. + Unknown ContentKeyPolicyFairPlayRentalAndLeaseKeyType = "Unknown" +) + +// PossibleContentKeyPolicyFairPlayRentalAndLeaseKeyTypeValues returns an array of possible values for the ContentKeyPolicyFairPlayRentalAndLeaseKeyType const type. +func PossibleContentKeyPolicyFairPlayRentalAndLeaseKeyTypeValues() []ContentKeyPolicyFairPlayRentalAndLeaseKeyType { + return []ContentKeyPolicyFairPlayRentalAndLeaseKeyType{PersistentLimited, PersistentUnlimited, Undefined, Unknown} +} + +// ContentKeyPolicyPlayReadyContentType enumerates the values for content key policy play ready content type. +type ContentKeyPolicyPlayReadyContentType string + +const ( + // ContentKeyPolicyPlayReadyContentTypeUltraVioletDownload Ultraviolet download content type. + ContentKeyPolicyPlayReadyContentTypeUltraVioletDownload ContentKeyPolicyPlayReadyContentType = "UltraVioletDownload" + // ContentKeyPolicyPlayReadyContentTypeUltraVioletStreaming Ultraviolet streaming content type. + ContentKeyPolicyPlayReadyContentTypeUltraVioletStreaming ContentKeyPolicyPlayReadyContentType = "UltraVioletStreaming" + // ContentKeyPolicyPlayReadyContentTypeUnknown Represents a ContentKeyPolicyPlayReadyContentType that is + // unavailable in current API version. + ContentKeyPolicyPlayReadyContentTypeUnknown ContentKeyPolicyPlayReadyContentType = "Unknown" + // ContentKeyPolicyPlayReadyContentTypeUnspecified Unspecified content type. + ContentKeyPolicyPlayReadyContentTypeUnspecified ContentKeyPolicyPlayReadyContentType = "Unspecified" +) + +// PossibleContentKeyPolicyPlayReadyContentTypeValues returns an array of possible values for the ContentKeyPolicyPlayReadyContentType const type. +func PossibleContentKeyPolicyPlayReadyContentTypeValues() []ContentKeyPolicyPlayReadyContentType { + return []ContentKeyPolicyPlayReadyContentType{ContentKeyPolicyPlayReadyContentTypeUltraVioletDownload, ContentKeyPolicyPlayReadyContentTypeUltraVioletStreaming, ContentKeyPolicyPlayReadyContentTypeUnknown, ContentKeyPolicyPlayReadyContentTypeUnspecified} +} + +// ContentKeyPolicyPlayReadyLicenseType enumerates the values for content key policy play ready license type. +type ContentKeyPolicyPlayReadyLicenseType string + +const ( + // ContentKeyPolicyPlayReadyLicenseTypeNonPersistent Non persistent license. + ContentKeyPolicyPlayReadyLicenseTypeNonPersistent ContentKeyPolicyPlayReadyLicenseType = "NonPersistent" + // ContentKeyPolicyPlayReadyLicenseTypePersistent Persistent license. Allows offline playback. + ContentKeyPolicyPlayReadyLicenseTypePersistent ContentKeyPolicyPlayReadyLicenseType = "Persistent" + // ContentKeyPolicyPlayReadyLicenseTypeUnknown Represents a ContentKeyPolicyPlayReadyLicenseType that is + // unavailable in current API version. + ContentKeyPolicyPlayReadyLicenseTypeUnknown ContentKeyPolicyPlayReadyLicenseType = "Unknown" +) + +// PossibleContentKeyPolicyPlayReadyLicenseTypeValues returns an array of possible values for the ContentKeyPolicyPlayReadyLicenseType const type. +func PossibleContentKeyPolicyPlayReadyLicenseTypeValues() []ContentKeyPolicyPlayReadyLicenseType { + return []ContentKeyPolicyPlayReadyLicenseType{ContentKeyPolicyPlayReadyLicenseTypeNonPersistent, ContentKeyPolicyPlayReadyLicenseTypePersistent, ContentKeyPolicyPlayReadyLicenseTypeUnknown} +} + +// ContentKeyPolicyPlayReadyUnknownOutputPassingOption enumerates the values for content key policy play ready +// unknown output passing option. +type ContentKeyPolicyPlayReadyUnknownOutputPassingOption string + +const ( + // ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowed Passing the video portion of protected + // content to an Unknown Output is allowed. + ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowed ContentKeyPolicyPlayReadyUnknownOutputPassingOption = "Allowed" + // ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowedWithVideoConstriction Passing the video + // portion of protected content to an Unknown Output is allowed but with constrained resolution. + ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowedWithVideoConstriction ContentKeyPolicyPlayReadyUnknownOutputPassingOption = "AllowedWithVideoConstriction" + // ContentKeyPolicyPlayReadyUnknownOutputPassingOptionNotAllowed Passing the video portion of protected + // content to an Unknown Output is not allowed. + ContentKeyPolicyPlayReadyUnknownOutputPassingOptionNotAllowed ContentKeyPolicyPlayReadyUnknownOutputPassingOption = "NotAllowed" + // ContentKeyPolicyPlayReadyUnknownOutputPassingOptionUnknown Represents a + // ContentKeyPolicyPlayReadyUnknownOutputPassingOption that is unavailable in current API version. + ContentKeyPolicyPlayReadyUnknownOutputPassingOptionUnknown ContentKeyPolicyPlayReadyUnknownOutputPassingOption = "Unknown" +) + +// PossibleContentKeyPolicyPlayReadyUnknownOutputPassingOptionValues returns an array of possible values for the ContentKeyPolicyPlayReadyUnknownOutputPassingOption const type. +func PossibleContentKeyPolicyPlayReadyUnknownOutputPassingOptionValues() []ContentKeyPolicyPlayReadyUnknownOutputPassingOption { + return []ContentKeyPolicyPlayReadyUnknownOutputPassingOption{ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowed, ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowedWithVideoConstriction, ContentKeyPolicyPlayReadyUnknownOutputPassingOptionNotAllowed, ContentKeyPolicyPlayReadyUnknownOutputPassingOptionUnknown} +} + +// ContentKeyPolicyRestrictionTokenType enumerates the values for content key policy restriction token type. +type ContentKeyPolicyRestrictionTokenType string + +const ( + // ContentKeyPolicyRestrictionTokenTypeJwt JSON Web Token. + ContentKeyPolicyRestrictionTokenTypeJwt ContentKeyPolicyRestrictionTokenType = "Jwt" + // ContentKeyPolicyRestrictionTokenTypeSwt Simple Web Token. + ContentKeyPolicyRestrictionTokenTypeSwt ContentKeyPolicyRestrictionTokenType = "Swt" + // ContentKeyPolicyRestrictionTokenTypeUnknown Represents a ContentKeyPolicyRestrictionTokenType that is + // unavailable in current API version. + ContentKeyPolicyRestrictionTokenTypeUnknown ContentKeyPolicyRestrictionTokenType = "Unknown" +) + +// PossibleContentKeyPolicyRestrictionTokenTypeValues returns an array of possible values for the ContentKeyPolicyRestrictionTokenType const type. +func PossibleContentKeyPolicyRestrictionTokenTypeValues() []ContentKeyPolicyRestrictionTokenType { + return []ContentKeyPolicyRestrictionTokenType{ContentKeyPolicyRestrictionTokenTypeJwt, ContentKeyPolicyRestrictionTokenTypeSwt, ContentKeyPolicyRestrictionTokenTypeUnknown} +} + +// DeinterlaceMode enumerates the values for deinterlace mode. +type DeinterlaceMode string + +const ( + // AutoPixelAdaptive Apply automatic pixel adaptive de-interlacing on each frame in the input video. + AutoPixelAdaptive DeinterlaceMode = "AutoPixelAdaptive" + // Off Disables de-interlacing of the source video. + Off DeinterlaceMode = "Off" +) + +// PossibleDeinterlaceModeValues returns an array of possible values for the DeinterlaceMode const type. +func PossibleDeinterlaceModeValues() []DeinterlaceMode { + return []DeinterlaceMode{AutoPixelAdaptive, Off} +} + +// DeinterlaceParity enumerates the values for deinterlace parity. +type DeinterlaceParity string + +const ( + // Auto Automatically detect the order of fields + Auto DeinterlaceParity = "Auto" + // BottomFieldFirst Apply bottom field first processing of input video. + BottomFieldFirst DeinterlaceParity = "BottomFieldFirst" + // TopFieldFirst Apply top field first processing of input video. + TopFieldFirst DeinterlaceParity = "TopFieldFirst" +) + +// PossibleDeinterlaceParityValues returns an array of possible values for the DeinterlaceParity const type. +func PossibleDeinterlaceParityValues() []DeinterlaceParity { + return []DeinterlaceParity{Auto, BottomFieldFirst, TopFieldFirst} +} + +// EncoderNamedPreset enumerates the values for encoder named preset. +type EncoderNamedPreset string + +const ( + // AACGoodQualityAudio Produces a single MP4 file containing only stereo audio encoded at 192 kbps. + AACGoodQualityAudio EncoderNamedPreset = "AACGoodQualityAudio" + // AdaptiveStreaming Produces a set of GOP aligned MP4 files with H.264 video and stereo AAC audio. + // Auto-generates a bitrate ladder based on the input resolution and bitrate. The auto-generated preset + // will never exceed the input resolution and bitrate. For example, if the input is 720p at 3 Mbps, output + // will remain 720p at best, and will start at rates lower than 3 Mbps. The output will have video and + // audio in separate MP4 files, which is optimal for adaptive streaming. + AdaptiveStreaming EncoderNamedPreset = "AdaptiveStreaming" + // H264MultipleBitrate1080p Produces a set of 8 GOP-aligned MP4 files, ranging from 6000 kbps to 400 kbps, + // and stereo AAC audio. Resolution starts at 1080p and goes down to 360p. + H264MultipleBitrate1080p EncoderNamedPreset = "H264MultipleBitrate1080p" + // H264MultipleBitrate720p Produces a set of 6 GOP-aligned MP4 files, ranging from 3400 kbps to 400 kbps, + // and stereo AAC audio. Resolution starts at 720p and goes down to 360p. + H264MultipleBitrate720p EncoderNamedPreset = "H264MultipleBitrate720p" + // H264MultipleBitrateSD Produces a set of 5 GOP-aligned MP4 files, ranging from 1600kbps to 400 kbps, and + // stereo AAC audio. Resolution starts at 480p and goes down to 360p. + H264MultipleBitrateSD EncoderNamedPreset = "H264MultipleBitrateSD" + // H264SingleBitrate1080p Produces an MP4 file where the video is encoded with H.264 codec at 6750 kbps and + // a picture height of 1080 pixels, and the stereo audio is encoded with AAC-LC codec at 64 kbps. + H264SingleBitrate1080p EncoderNamedPreset = "H264SingleBitrate1080p" + // H264SingleBitrate720p Produces an MP4 file where the video is encoded with H.264 codec at 4500 kbps and + // a picture height of 720 pixels, and the stereo audio is encoded with AAC-LC codec at 64 kbps. + H264SingleBitrate720p EncoderNamedPreset = "H264SingleBitrate720p" + // H264SingleBitrateSD Produces an MP4 file where the video is encoded with H.264 codec at 2200 kbps and a + // picture height of 480 pixels, and the stereo audio is encoded with AAC-LC codec at 64 kbps. + H264SingleBitrateSD EncoderNamedPreset = "H264SingleBitrateSD" +) + +// PossibleEncoderNamedPresetValues returns an array of possible values for the EncoderNamedPreset const type. +func PossibleEncoderNamedPresetValues() []EncoderNamedPreset { + return []EncoderNamedPreset{AACGoodQualityAudio, AdaptiveStreaming, H264MultipleBitrate1080p, H264MultipleBitrate720p, H264MultipleBitrateSD, H264SingleBitrate1080p, H264SingleBitrate720p, H264SingleBitrateSD} +} + +// EncryptionScheme enumerates the values for encryption scheme. +type EncryptionScheme string + +const ( + // EncryptionSchemeCommonEncryptionCbcs CommonEncryptionCbcs scheme + EncryptionSchemeCommonEncryptionCbcs EncryptionScheme = "CommonEncryptionCbcs" + // EncryptionSchemeCommonEncryptionCenc CommonEncryptionCenc scheme + EncryptionSchemeCommonEncryptionCenc EncryptionScheme = "CommonEncryptionCenc" + // EncryptionSchemeEnvelopeEncryption EnvelopeEncryption scheme + EncryptionSchemeEnvelopeEncryption EncryptionScheme = "EnvelopeEncryption" + // EncryptionSchemeNoEncryption NoEncryption scheme + EncryptionSchemeNoEncryption EncryptionScheme = "NoEncryption" +) + +// PossibleEncryptionSchemeValues returns an array of possible values for the EncryptionScheme const type. +func PossibleEncryptionSchemeValues() []EncryptionScheme { + return []EncryptionScheme{EncryptionSchemeCommonEncryptionCbcs, EncryptionSchemeCommonEncryptionCenc, EncryptionSchemeEnvelopeEncryption, EncryptionSchemeNoEncryption} +} + +// EntropyMode enumerates the values for entropy mode. +type EntropyMode string + +const ( + // Cabac Context Adaptive Binary Arithmetic Coder (CABAC) entropy encoding. + Cabac EntropyMode = "Cabac" + // Cavlc Context Adaptive Variable Length Coder (CAVLC) entropy encoding. + Cavlc EntropyMode = "Cavlc" +) + +// PossibleEntropyModeValues returns an array of possible values for the EntropyMode const type. +func PossibleEntropyModeValues() []EntropyMode { + return []EntropyMode{Cabac, Cavlc} +} + +// FilterTrackPropertyCompareOperation enumerates the values for filter track property compare operation. +type FilterTrackPropertyCompareOperation string + +const ( + // Equal The equal operation. + Equal FilterTrackPropertyCompareOperation = "Equal" + // NotEqual The not equal operation. + NotEqual FilterTrackPropertyCompareOperation = "NotEqual" +) + +// PossibleFilterTrackPropertyCompareOperationValues returns an array of possible values for the FilterTrackPropertyCompareOperation const type. +func PossibleFilterTrackPropertyCompareOperationValues() []FilterTrackPropertyCompareOperation { + return []FilterTrackPropertyCompareOperation{Equal, NotEqual} +} + +// FilterTrackPropertyType enumerates the values for filter track property type. +type FilterTrackPropertyType string + +const ( + // FilterTrackPropertyTypeBitrate The bitrate. + FilterTrackPropertyTypeBitrate FilterTrackPropertyType = "Bitrate" + // FilterTrackPropertyTypeFourCC The fourCC. + FilterTrackPropertyTypeFourCC FilterTrackPropertyType = "FourCC" + // FilterTrackPropertyTypeLanguage The language. + FilterTrackPropertyTypeLanguage FilterTrackPropertyType = "Language" + // FilterTrackPropertyTypeName The name. + FilterTrackPropertyTypeName FilterTrackPropertyType = "Name" + // FilterTrackPropertyTypeType The type. + FilterTrackPropertyTypeType FilterTrackPropertyType = "Type" + // FilterTrackPropertyTypeUnknown The unknown track property type. + FilterTrackPropertyTypeUnknown FilterTrackPropertyType = "Unknown" +) + +// PossibleFilterTrackPropertyTypeValues returns an array of possible values for the FilterTrackPropertyType const type. +func PossibleFilterTrackPropertyTypeValues() []FilterTrackPropertyType { + return []FilterTrackPropertyType{FilterTrackPropertyTypeBitrate, FilterTrackPropertyTypeFourCC, FilterTrackPropertyTypeLanguage, FilterTrackPropertyTypeName, FilterTrackPropertyTypeType, FilterTrackPropertyTypeUnknown} +} + +// H264Complexity enumerates the values for h264 complexity. +type H264Complexity string + +const ( + // Balanced Tells the encoder to use settings that achieve a balance between speed and quality. + Balanced H264Complexity = "Balanced" + // Quality Tells the encoder to use settings that are optimized to produce higher quality output at the + // expense of slower overall encode time. + Quality H264Complexity = "Quality" + // Speed Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed to + // decrease encoding time. + Speed H264Complexity = "Speed" +) + +// PossibleH264ComplexityValues returns an array of possible values for the H264Complexity const type. +func PossibleH264ComplexityValues() []H264Complexity { + return []H264Complexity{Balanced, Quality, Speed} +} + +// H264VideoProfile enumerates the values for h264 video profile. +type H264VideoProfile string + +const ( + // H264VideoProfileAuto Tells the encoder to automatically determine the appropriate H.264 profile. + H264VideoProfileAuto H264VideoProfile = "Auto" + // H264VideoProfileBaseline Baseline profile + H264VideoProfileBaseline H264VideoProfile = "Baseline" + // H264VideoProfileHigh High profile. + H264VideoProfileHigh H264VideoProfile = "High" + // H264VideoProfileHigh422 High 4:2:2 profile. + H264VideoProfileHigh422 H264VideoProfile = "High422" + // H264VideoProfileHigh444 High 4:4:4 predictive profile. + H264VideoProfileHigh444 H264VideoProfile = "High444" + // H264VideoProfileMain Main profile + H264VideoProfileMain H264VideoProfile = "Main" +) + +// PossibleH264VideoProfileValues returns an array of possible values for the H264VideoProfile const type. +func PossibleH264VideoProfileValues() []H264VideoProfile { + return []H264VideoProfile{H264VideoProfileAuto, H264VideoProfileBaseline, H264VideoProfileHigh, H264VideoProfileHigh422, H264VideoProfileHigh444, H264VideoProfileMain} +} + +// InsightsType enumerates the values for insights type. +type InsightsType string + +const ( + // AllInsights Generate both audio and video insights. Fails if either audio or video Insights fail. + AllInsights InsightsType = "AllInsights" + // AudioInsightsOnly Generate audio only insights. Ignore video even if present. Fails if no audio is + // present. + AudioInsightsOnly InsightsType = "AudioInsightsOnly" + // VideoInsightsOnly Generate video only insights. Ignore audio if present. Fails if no video is present. + VideoInsightsOnly InsightsType = "VideoInsightsOnly" +) + +// PossibleInsightsTypeValues returns an array of possible values for the InsightsType const type. +func PossibleInsightsTypeValues() []InsightsType { + return []InsightsType{AllInsights, AudioInsightsOnly, VideoInsightsOnly} +} + +// JobErrorCategory enumerates the values for job error category. +type JobErrorCategory string + +const ( + // JobErrorCategoryConfiguration The error is configuration related. + JobErrorCategoryConfiguration JobErrorCategory = "Configuration" + // JobErrorCategoryContent The error is related to data in the input files. + JobErrorCategoryContent JobErrorCategory = "Content" + // JobErrorCategoryDownload The error is download related. + JobErrorCategoryDownload JobErrorCategory = "Download" + // JobErrorCategoryService The error is service related. + JobErrorCategoryService JobErrorCategory = "Service" + // JobErrorCategoryUpload The error is upload related. + JobErrorCategoryUpload JobErrorCategory = "Upload" +) + +// PossibleJobErrorCategoryValues returns an array of possible values for the JobErrorCategory const type. +func PossibleJobErrorCategoryValues() []JobErrorCategory { + return []JobErrorCategory{JobErrorCategoryConfiguration, JobErrorCategoryContent, JobErrorCategoryDownload, JobErrorCategoryService, JobErrorCategoryUpload} +} + +// JobErrorCode enumerates the values for job error code. +type JobErrorCode string + +const ( + // ConfigurationUnsupported There was a problem with the combination of input files and the configuration + // settings applied, fix the configuration settings and retry with the same input, or change input to match + // the configuration. + ConfigurationUnsupported JobErrorCode = "ConfigurationUnsupported" + // ContentMalformed There was a problem with the input content (for example: zero byte files, or + // corrupt/non-decodable files), check the input files. + ContentMalformed JobErrorCode = "ContentMalformed" + // ContentUnsupported There was a problem with the format of the input (not valid media file, or an + // unsupported file/codec), check the validity of the input files. + ContentUnsupported JobErrorCode = "ContentUnsupported" + // DownloadNotAccessible While trying to download the input files, the files were not accessible, please + // check the availability of the source. + DownloadNotAccessible JobErrorCode = "DownloadNotAccessible" + // DownloadTransientError While trying to download the input files, there was an issue during transfer + // (storage service, network errors), see details and check your source. + DownloadTransientError JobErrorCode = "DownloadTransientError" + // ServiceError Fatal service error, please contact support. + ServiceError JobErrorCode = "ServiceError" + // ServiceTransientError Transient error, please retry, if retry is unsuccessful, please contact support. + ServiceTransientError JobErrorCode = "ServiceTransientError" + // UploadNotAccessible While trying to upload the output files, the destination was not reachable, please + // check the availability of the destination. + UploadNotAccessible JobErrorCode = "UploadNotAccessible" + // UploadTransientError While trying to upload the output files, there was an issue during transfer + // (storage service, network errors), see details and check your destination. + UploadTransientError JobErrorCode = "UploadTransientError" +) + +// PossibleJobErrorCodeValues returns an array of possible values for the JobErrorCode const type. +func PossibleJobErrorCodeValues() []JobErrorCode { + return []JobErrorCode{ConfigurationUnsupported, ContentMalformed, ContentUnsupported, DownloadNotAccessible, DownloadTransientError, ServiceError, ServiceTransientError, UploadNotAccessible, UploadTransientError} +} + +// JobRetry enumerates the values for job retry. +type JobRetry string + +const ( + // DoNotRetry Issue needs to be investigated and then the job resubmitted with corrections or retried once + // the underlying issue has been corrected. + DoNotRetry JobRetry = "DoNotRetry" + // MayRetry Issue may be resolved after waiting for a period of time and resubmitting the same Job. + MayRetry JobRetry = "MayRetry" +) + +// PossibleJobRetryValues returns an array of possible values for the JobRetry const type. +func PossibleJobRetryValues() []JobRetry { + return []JobRetry{DoNotRetry, MayRetry} +} + +// JobState enumerates the values for job state. +type JobState string + +const ( + // Canceled The job was canceled. This is a final state for the job. + Canceled JobState = "Canceled" + // Canceling The job is in the process of being canceled. This is a transient state for the job. + Canceling JobState = "Canceling" + // Error The job has encountered an error. This is a final state for the job. + Error JobState = "Error" + // Finished The job is finished. This is a final state for the job. + Finished JobState = "Finished" + // Processing The job is processing. This is a transient state for the job. + Processing JobState = "Processing" + // Queued The job is in a queued state, waiting for resources to become available. This is a transient + // state. + Queued JobState = "Queued" + // Scheduled The job is being scheduled to run on an available resource. This is a transient state, between + // queued and processing states. + Scheduled JobState = "Scheduled" +) + +// PossibleJobStateValues returns an array of possible values for the JobState const type. +func PossibleJobStateValues() []JobState { + return []JobState{Canceled, Canceling, Error, Finished, Processing, Queued, Scheduled} +} + +// LiveEventEncodingType enumerates the values for live event encoding type. +type LiveEventEncodingType string + +const ( + // LiveEventEncodingTypeBasic ... + LiveEventEncodingTypeBasic LiveEventEncodingType = "Basic" + // LiveEventEncodingTypeNone ... + LiveEventEncodingTypeNone LiveEventEncodingType = "None" + // LiveEventEncodingTypeStandard ... + LiveEventEncodingTypeStandard LiveEventEncodingType = "Standard" +) + +// PossibleLiveEventEncodingTypeValues returns an array of possible values for the LiveEventEncodingType const type. +func PossibleLiveEventEncodingTypeValues() []LiveEventEncodingType { + return []LiveEventEncodingType{LiveEventEncodingTypeBasic, LiveEventEncodingTypeNone, LiveEventEncodingTypeStandard} +} + +// LiveEventInputProtocol enumerates the values for live event input protocol. +type LiveEventInputProtocol string + +const ( + // FragmentedMP4 ... + FragmentedMP4 LiveEventInputProtocol = "FragmentedMP4" + // RTMP ... + RTMP LiveEventInputProtocol = "RTMP" +) + +// PossibleLiveEventInputProtocolValues returns an array of possible values for the LiveEventInputProtocol const type. +func PossibleLiveEventInputProtocolValues() []LiveEventInputProtocol { + return []LiveEventInputProtocol{FragmentedMP4, RTMP} +} + +// LiveEventResourceState enumerates the values for live event resource state. +type LiveEventResourceState string + +const ( + // Deleting ... + Deleting LiveEventResourceState = "Deleting" + // Running ... + Running LiveEventResourceState = "Running" + // Starting ... + Starting LiveEventResourceState = "Starting" + // Stopped ... + Stopped LiveEventResourceState = "Stopped" + // Stopping ... + Stopping LiveEventResourceState = "Stopping" +) + +// PossibleLiveEventResourceStateValues returns an array of possible values for the LiveEventResourceState const type. +func PossibleLiveEventResourceStateValues() []LiveEventResourceState { + return []LiveEventResourceState{Deleting, Running, Starting, Stopped, Stopping} +} + +// LiveOutputResourceState enumerates the values for live output resource state. +type LiveOutputResourceState string + +const ( + // LiveOutputResourceStateCreating ... + LiveOutputResourceStateCreating LiveOutputResourceState = "Creating" + // LiveOutputResourceStateDeleting ... + LiveOutputResourceStateDeleting LiveOutputResourceState = "Deleting" + // LiveOutputResourceStateRunning ... + LiveOutputResourceStateRunning LiveOutputResourceState = "Running" +) + +// PossibleLiveOutputResourceStateValues returns an array of possible values for the LiveOutputResourceState const type. +func PossibleLiveOutputResourceStateValues() []LiveOutputResourceState { + return []LiveOutputResourceState{LiveOutputResourceStateCreating, LiveOutputResourceStateDeleting, LiveOutputResourceStateRunning} +} + +// MetricAggregationType enumerates the values for metric aggregation type. +type MetricAggregationType string + +const ( + // Average The average. + Average MetricAggregationType = "Average" + // Count The count of a number of items, usually requests. + Count MetricAggregationType = "Count" + // Total The sum. + Total MetricAggregationType = "Total" +) + +// PossibleMetricAggregationTypeValues returns an array of possible values for the MetricAggregationType const type. +func PossibleMetricAggregationTypeValues() []MetricAggregationType { + return []MetricAggregationType{Average, Count, Total} +} + +// MetricUnit enumerates the values for metric unit. +type MetricUnit string + +const ( + // MetricUnitBytes The number of bytes. + MetricUnitBytes MetricUnit = "Bytes" + // MetricUnitCount The count. + MetricUnitCount MetricUnit = "Count" + // MetricUnitMilliseconds The number of milliseconds. + MetricUnitMilliseconds MetricUnit = "Milliseconds" +) + +// PossibleMetricUnitValues returns an array of possible values for the MetricUnit const type. +func PossibleMetricUnitValues() []MetricUnit { + return []MetricUnit{MetricUnitBytes, MetricUnitCount, MetricUnitMilliseconds} +} + +// OdataType enumerates the values for odata type. +type OdataType string + +const ( + // OdataTypeContentKeyPolicyPlayReadyContentKeyLocation ... + OdataTypeContentKeyPolicyPlayReadyContentKeyLocation OdataType = "ContentKeyPolicyPlayReadyContentKeyLocation" + // OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader ... + OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader OdataType = "#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader" + // OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier ... + OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier OdataType = "#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier" +) + +// PossibleOdataTypeValues returns an array of possible values for the OdataType const type. +func PossibleOdataTypeValues() []OdataType { + return []OdataType{OdataTypeContentKeyPolicyPlayReadyContentKeyLocation, OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier} +} + +// OdataTypeBasicCodec enumerates the values for odata type basic codec. +type OdataTypeBasicCodec string + +const ( + // OdataTypeCodec ... + OdataTypeCodec OdataTypeBasicCodec = "Codec" + // OdataTypeMicrosoftMediaAacAudio ... + OdataTypeMicrosoftMediaAacAudio OdataTypeBasicCodec = "#Microsoft.Media.AacAudio" + // OdataTypeMicrosoftMediaAudio ... + OdataTypeMicrosoftMediaAudio OdataTypeBasicCodec = "#Microsoft.Media.Audio" + // OdataTypeMicrosoftMediaCopyAudio ... + OdataTypeMicrosoftMediaCopyAudio OdataTypeBasicCodec = "#Microsoft.Media.CopyAudio" + // OdataTypeMicrosoftMediaCopyVideo ... + OdataTypeMicrosoftMediaCopyVideo OdataTypeBasicCodec = "#Microsoft.Media.CopyVideo" + // OdataTypeMicrosoftMediaH264Video ... + OdataTypeMicrosoftMediaH264Video OdataTypeBasicCodec = "#Microsoft.Media.H264Video" + // OdataTypeMicrosoftMediaImage ... + OdataTypeMicrosoftMediaImage OdataTypeBasicCodec = "#Microsoft.Media.Image" + // OdataTypeMicrosoftMediaJpgImage ... + OdataTypeMicrosoftMediaJpgImage OdataTypeBasicCodec = "#Microsoft.Media.JpgImage" + // OdataTypeMicrosoftMediaPngImage ... + OdataTypeMicrosoftMediaPngImage OdataTypeBasicCodec = "#Microsoft.Media.PngImage" + // OdataTypeMicrosoftMediaVideo ... + OdataTypeMicrosoftMediaVideo OdataTypeBasicCodec = "#Microsoft.Media.Video" +) + +// PossibleOdataTypeBasicCodecValues returns an array of possible values for the OdataTypeBasicCodec const type. +func PossibleOdataTypeBasicCodecValues() []OdataTypeBasicCodec { + return []OdataTypeBasicCodec{OdataTypeCodec, OdataTypeMicrosoftMediaAacAudio, OdataTypeMicrosoftMediaAudio, OdataTypeMicrosoftMediaCopyAudio, OdataTypeMicrosoftMediaCopyVideo, OdataTypeMicrosoftMediaH264Video, OdataTypeMicrosoftMediaImage, OdataTypeMicrosoftMediaJpgImage, OdataTypeMicrosoftMediaPngImage, OdataTypeMicrosoftMediaVideo} +} + +// OdataTypeBasicContentKeyPolicyConfiguration enumerates the values for odata type basic content key policy +// configuration. +type OdataTypeBasicContentKeyPolicyConfiguration string + +const ( + // OdataTypeContentKeyPolicyConfiguration ... + OdataTypeContentKeyPolicyConfiguration OdataTypeBasicContentKeyPolicyConfiguration = "ContentKeyPolicyConfiguration" + // OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration ... + OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration OdataTypeBasicContentKeyPolicyConfiguration = "#Microsoft.Media.ContentKeyPolicyClearKeyConfiguration" + // OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration ... + OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration OdataTypeBasicContentKeyPolicyConfiguration = "#Microsoft.Media.ContentKeyPolicyFairPlayConfiguration" + // OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration ... + OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration OdataTypeBasicContentKeyPolicyConfiguration = "#Microsoft.Media.ContentKeyPolicyPlayReadyConfiguration" + // OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration ... + OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration OdataTypeBasicContentKeyPolicyConfiguration = "#Microsoft.Media.ContentKeyPolicyUnknownConfiguration" + // OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration ... + OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration OdataTypeBasicContentKeyPolicyConfiguration = "#Microsoft.Media.ContentKeyPolicyWidevineConfiguration" +) + +// PossibleOdataTypeBasicContentKeyPolicyConfigurationValues returns an array of possible values for the OdataTypeBasicContentKeyPolicyConfiguration const type. +func PossibleOdataTypeBasicContentKeyPolicyConfigurationValues() []OdataTypeBasicContentKeyPolicyConfiguration { + return []OdataTypeBasicContentKeyPolicyConfiguration{OdataTypeContentKeyPolicyConfiguration, OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration, OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration, OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration, OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration, OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration} +} + +// OdataTypeBasicContentKeyPolicyRestriction enumerates the values for odata type basic content key policy +// restriction. +type OdataTypeBasicContentKeyPolicyRestriction string + +const ( + // OdataTypeContentKeyPolicyRestriction ... + OdataTypeContentKeyPolicyRestriction OdataTypeBasicContentKeyPolicyRestriction = "ContentKeyPolicyRestriction" + // OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction ... + OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction OdataTypeBasicContentKeyPolicyRestriction = "#Microsoft.Media.ContentKeyPolicyOpenRestriction" + // OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction ... + OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction OdataTypeBasicContentKeyPolicyRestriction = "#Microsoft.Media.ContentKeyPolicyTokenRestriction" + // OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction ... + OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction OdataTypeBasicContentKeyPolicyRestriction = "#Microsoft.Media.ContentKeyPolicyUnknownRestriction" +) + +// PossibleOdataTypeBasicContentKeyPolicyRestrictionValues returns an array of possible values for the OdataTypeBasicContentKeyPolicyRestriction const type. +func PossibleOdataTypeBasicContentKeyPolicyRestrictionValues() []OdataTypeBasicContentKeyPolicyRestriction { + return []OdataTypeBasicContentKeyPolicyRestriction{OdataTypeContentKeyPolicyRestriction, OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction, OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction, OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction} +} + +// OdataTypeBasicContentKeyPolicyRestrictionTokenKey enumerates the values for odata type basic content key +// policy restriction token key. +type OdataTypeBasicContentKeyPolicyRestrictionTokenKey string + +const ( + // OdataTypeContentKeyPolicyRestrictionTokenKey ... + OdataTypeContentKeyPolicyRestrictionTokenKey OdataTypeBasicContentKeyPolicyRestrictionTokenKey = "ContentKeyPolicyRestrictionTokenKey" + // OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey ... + OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey OdataTypeBasicContentKeyPolicyRestrictionTokenKey = "#Microsoft.Media.ContentKeyPolicyRsaTokenKey" + // OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey ... + OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey OdataTypeBasicContentKeyPolicyRestrictionTokenKey = "#Microsoft.Media.ContentKeyPolicySymmetricTokenKey" + // OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey ... + OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey OdataTypeBasicContentKeyPolicyRestrictionTokenKey = "#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey" +) + +// PossibleOdataTypeBasicContentKeyPolicyRestrictionTokenKeyValues returns an array of possible values for the OdataTypeBasicContentKeyPolicyRestrictionTokenKey const type. +func PossibleOdataTypeBasicContentKeyPolicyRestrictionTokenKeyValues() []OdataTypeBasicContentKeyPolicyRestrictionTokenKey { + return []OdataTypeBasicContentKeyPolicyRestrictionTokenKey{OdataTypeContentKeyPolicyRestrictionTokenKey, OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey, OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey, OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey} +} + +// OdataTypeBasicFormat enumerates the values for odata type basic format. +type OdataTypeBasicFormat string + +const ( + // OdataTypeFormat ... + OdataTypeFormat OdataTypeBasicFormat = "Format" + // OdataTypeMicrosoftMediaImageFormat ... + OdataTypeMicrosoftMediaImageFormat OdataTypeBasicFormat = "#Microsoft.Media.ImageFormat" + // OdataTypeMicrosoftMediaJpgFormat ... + OdataTypeMicrosoftMediaJpgFormat OdataTypeBasicFormat = "#Microsoft.Media.JpgFormat" + // OdataTypeMicrosoftMediaMp4Format ... + OdataTypeMicrosoftMediaMp4Format OdataTypeBasicFormat = "#Microsoft.Media.Mp4Format" + // OdataTypeMicrosoftMediaMultiBitrateFormat ... + OdataTypeMicrosoftMediaMultiBitrateFormat OdataTypeBasicFormat = "#Microsoft.Media.MultiBitrateFormat" + // OdataTypeMicrosoftMediaPngFormat ... + OdataTypeMicrosoftMediaPngFormat OdataTypeBasicFormat = "#Microsoft.Media.PngFormat" + // OdataTypeMicrosoftMediaTransportStreamFormat ... + OdataTypeMicrosoftMediaTransportStreamFormat OdataTypeBasicFormat = "#Microsoft.Media.TransportStreamFormat" +) + +// PossibleOdataTypeBasicFormatValues returns an array of possible values for the OdataTypeBasicFormat const type. +func PossibleOdataTypeBasicFormatValues() []OdataTypeBasicFormat { + return []OdataTypeBasicFormat{OdataTypeFormat, OdataTypeMicrosoftMediaImageFormat, OdataTypeMicrosoftMediaJpgFormat, OdataTypeMicrosoftMediaMp4Format, OdataTypeMicrosoftMediaMultiBitrateFormat, OdataTypeMicrosoftMediaPngFormat, OdataTypeMicrosoftMediaTransportStreamFormat} +} + +// OdataTypeBasicJobInput enumerates the values for odata type basic job input. +type OdataTypeBasicJobInput string + +const ( + // OdataTypeJobInput ... + OdataTypeJobInput OdataTypeBasicJobInput = "JobInput" + // OdataTypeMicrosoftMediaJobInputAsset ... + OdataTypeMicrosoftMediaJobInputAsset OdataTypeBasicJobInput = "#Microsoft.Media.JobInputAsset" + // OdataTypeMicrosoftMediaJobInputClip ... + OdataTypeMicrosoftMediaJobInputClip OdataTypeBasicJobInput = "#Microsoft.Media.JobInputClip" + // OdataTypeMicrosoftMediaJobInputHTTP ... + OdataTypeMicrosoftMediaJobInputHTTP OdataTypeBasicJobInput = "#Microsoft.Media.JobInputHttp" + // OdataTypeMicrosoftMediaJobInputs ... + OdataTypeMicrosoftMediaJobInputs OdataTypeBasicJobInput = "#Microsoft.Media.JobInputs" +) + +// PossibleOdataTypeBasicJobInputValues returns an array of possible values for the OdataTypeBasicJobInput const type. +func PossibleOdataTypeBasicJobInputValues() []OdataTypeBasicJobInput { + return []OdataTypeBasicJobInput{OdataTypeJobInput, OdataTypeMicrosoftMediaJobInputAsset, OdataTypeMicrosoftMediaJobInputClip, OdataTypeMicrosoftMediaJobInputHTTP, OdataTypeMicrosoftMediaJobInputs} +} + +// OdataTypeBasicJobOutput enumerates the values for odata type basic job output. +type OdataTypeBasicJobOutput string + +const ( + // OdataTypeJobOutput ... + OdataTypeJobOutput OdataTypeBasicJobOutput = "JobOutput" + // OdataTypeMicrosoftMediaJobOutputAsset ... + OdataTypeMicrosoftMediaJobOutputAsset OdataTypeBasicJobOutput = "#Microsoft.Media.JobOutputAsset" +) + +// PossibleOdataTypeBasicJobOutputValues returns an array of possible values for the OdataTypeBasicJobOutput const type. +func PossibleOdataTypeBasicJobOutputValues() []OdataTypeBasicJobOutput { + return []OdataTypeBasicJobOutput{OdataTypeJobOutput, OdataTypeMicrosoftMediaJobOutputAsset} +} + +// OdataTypeBasicLayer enumerates the values for odata type basic layer. +type OdataTypeBasicLayer string + +const ( + // OdataTypeLayer ... + OdataTypeLayer OdataTypeBasicLayer = "Layer" + // OdataTypeMicrosoftMediaH264Layer ... + OdataTypeMicrosoftMediaH264Layer OdataTypeBasicLayer = "#Microsoft.Media.H264Layer" + // OdataTypeMicrosoftMediaJpgLayer ... + OdataTypeMicrosoftMediaJpgLayer OdataTypeBasicLayer = "#Microsoft.Media.JpgLayer" + // OdataTypeMicrosoftMediaPngLayer ... + OdataTypeMicrosoftMediaPngLayer OdataTypeBasicLayer = "#Microsoft.Media.PngLayer" + // OdataTypeMicrosoftMediaVideoLayer ... + OdataTypeMicrosoftMediaVideoLayer OdataTypeBasicLayer = "#Microsoft.Media.VideoLayer" +) + +// PossibleOdataTypeBasicLayerValues returns an array of possible values for the OdataTypeBasicLayer const type. +func PossibleOdataTypeBasicLayerValues() []OdataTypeBasicLayer { + return []OdataTypeBasicLayer{OdataTypeLayer, OdataTypeMicrosoftMediaH264Layer, OdataTypeMicrosoftMediaJpgLayer, OdataTypeMicrosoftMediaPngLayer, OdataTypeMicrosoftMediaVideoLayer} +} + +// OdataTypeBasicOverlay enumerates the values for odata type basic overlay. +type OdataTypeBasicOverlay string + +const ( + // OdataTypeMicrosoftMediaAudioOverlay ... + OdataTypeMicrosoftMediaAudioOverlay OdataTypeBasicOverlay = "#Microsoft.Media.AudioOverlay" + // OdataTypeMicrosoftMediaVideoOverlay ... + OdataTypeMicrosoftMediaVideoOverlay OdataTypeBasicOverlay = "#Microsoft.Media.VideoOverlay" + // OdataTypeOverlay ... + OdataTypeOverlay OdataTypeBasicOverlay = "Overlay" +) + +// PossibleOdataTypeBasicOverlayValues returns an array of possible values for the OdataTypeBasicOverlay const type. +func PossibleOdataTypeBasicOverlayValues() []OdataTypeBasicOverlay { + return []OdataTypeBasicOverlay{OdataTypeMicrosoftMediaAudioOverlay, OdataTypeMicrosoftMediaVideoOverlay, OdataTypeOverlay} +} + +// OdataTypeBasicPreset enumerates the values for odata type basic preset. +type OdataTypeBasicPreset string + +const ( + // OdataTypeMicrosoftMediaAudioAnalyzerPreset ... + OdataTypeMicrosoftMediaAudioAnalyzerPreset OdataTypeBasicPreset = "#Microsoft.Media.AudioAnalyzerPreset" + // OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset ... + OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset OdataTypeBasicPreset = "#Microsoft.Media.BuiltInStandardEncoderPreset" + // OdataTypeMicrosoftMediaStandardEncoderPreset ... + OdataTypeMicrosoftMediaStandardEncoderPreset OdataTypeBasicPreset = "#Microsoft.Media.StandardEncoderPreset" + // OdataTypeMicrosoftMediaVideoAnalyzerPreset ... + OdataTypeMicrosoftMediaVideoAnalyzerPreset OdataTypeBasicPreset = "#Microsoft.Media.VideoAnalyzerPreset" + // OdataTypePreset ... + OdataTypePreset OdataTypeBasicPreset = "Preset" +) + +// PossibleOdataTypeBasicPresetValues returns an array of possible values for the OdataTypeBasicPreset const type. +func PossibleOdataTypeBasicPresetValues() []OdataTypeBasicPreset { + return []OdataTypeBasicPreset{OdataTypeMicrosoftMediaAudioAnalyzerPreset, OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset, OdataTypeMicrosoftMediaStandardEncoderPreset, OdataTypeMicrosoftMediaVideoAnalyzerPreset, OdataTypePreset} +} + +// OnErrorType enumerates the values for on error type. +type OnErrorType string + +const ( + // ContinueJob Tells the service that if this TransformOutput fails, then allow any other TransformOutput + // to continue. + ContinueJob OnErrorType = "ContinueJob" + // StopProcessingJob Tells the service that if this TransformOutput fails, then any other incomplete + // TransformOutputs can be stopped. + StopProcessingJob OnErrorType = "StopProcessingJob" +) + +// PossibleOnErrorTypeValues returns an array of possible values for the OnErrorType const type. +func PossibleOnErrorTypeValues() []OnErrorType { + return []OnErrorType{ContinueJob, StopProcessingJob} +} + +// Priority enumerates the values for priority. +type Priority string + +const ( + // High Used for TransformOutputs that should take precedence over others. + High Priority = "High" + // Low Used for TransformOutputs that can be generated after Normal and High priority TransformOutputs. + Low Priority = "Low" + // Normal Used for TransformOutputs that can be generated at Normal priority. + Normal Priority = "Normal" +) + +// PossiblePriorityValues returns an array of possible values for the Priority const type. +func PossiblePriorityValues() []Priority { + return []Priority{High, Low, Normal} +} + +// Rotation enumerates the values for rotation. +type Rotation string + +const ( + // RotationAuto Automatically detect and rotate as needed. + RotationAuto Rotation = "Auto" + // RotationNone Do not rotate the video. If the output format supports it, any metadata about rotation is + // kept intact. + RotationNone Rotation = "None" + // RotationRotate0 Do not rotate the video but remove any metadata about the rotation. + RotationRotate0 Rotation = "Rotate0" + // RotationRotate180 Rotate 180 degrees clockwise. + RotationRotate180 Rotation = "Rotate180" + // RotationRotate270 Rotate 270 degrees clockwise. + RotationRotate270 Rotation = "Rotate270" + // RotationRotate90 Rotate 90 degrees clockwise. + RotationRotate90 Rotation = "Rotate90" +) + +// PossibleRotationValues returns an array of possible values for the Rotation const type. +func PossibleRotationValues() []Rotation { + return []Rotation{RotationAuto, RotationNone, RotationRotate0, RotationRotate180, RotationRotate270, RotationRotate90} +} + +// StorageAccountType enumerates the values for storage account type. +type StorageAccountType string + +const ( + // Primary The primary storage account for the Media Services account. + Primary StorageAccountType = "Primary" + // Secondary A secondary storage account for the Media Services account. + Secondary StorageAccountType = "Secondary" +) + +// PossibleStorageAccountTypeValues returns an array of possible values for the StorageAccountType const type. +func PossibleStorageAccountTypeValues() []StorageAccountType { + return []StorageAccountType{Primary, Secondary} +} + +// StreamingEndpointResourceState enumerates the values for streaming endpoint resource state. +type StreamingEndpointResourceState string + +const ( + // StreamingEndpointResourceStateDeleting ... + StreamingEndpointResourceStateDeleting StreamingEndpointResourceState = "Deleting" + // StreamingEndpointResourceStateRunning ... + StreamingEndpointResourceStateRunning StreamingEndpointResourceState = "Running" + // StreamingEndpointResourceStateScaling ... + StreamingEndpointResourceStateScaling StreamingEndpointResourceState = "Scaling" + // StreamingEndpointResourceStateStarting ... + StreamingEndpointResourceStateStarting StreamingEndpointResourceState = "Starting" + // StreamingEndpointResourceStateStopped ... + StreamingEndpointResourceStateStopped StreamingEndpointResourceState = "Stopped" + // StreamingEndpointResourceStateStopping ... + StreamingEndpointResourceStateStopping StreamingEndpointResourceState = "Stopping" +) + +// PossibleStreamingEndpointResourceStateValues returns an array of possible values for the StreamingEndpointResourceState const type. +func PossibleStreamingEndpointResourceStateValues() []StreamingEndpointResourceState { + return []StreamingEndpointResourceState{StreamingEndpointResourceStateDeleting, StreamingEndpointResourceStateRunning, StreamingEndpointResourceStateScaling, StreamingEndpointResourceStateStarting, StreamingEndpointResourceStateStopped, StreamingEndpointResourceStateStopping} +} + +// StreamingLocatorContentKeyType enumerates the values for streaming locator content key type. +type StreamingLocatorContentKeyType string + +const ( + // StreamingLocatorContentKeyTypeCommonEncryptionCbcs Common Encryption using CBCS + StreamingLocatorContentKeyTypeCommonEncryptionCbcs StreamingLocatorContentKeyType = "CommonEncryptionCbcs" + // StreamingLocatorContentKeyTypeCommonEncryptionCenc Common Encryption using CENC + StreamingLocatorContentKeyTypeCommonEncryptionCenc StreamingLocatorContentKeyType = "CommonEncryptionCenc" + // StreamingLocatorContentKeyTypeEnvelopeEncryption Envelope Encryption + StreamingLocatorContentKeyTypeEnvelopeEncryption StreamingLocatorContentKeyType = "EnvelopeEncryption" +) + +// PossibleStreamingLocatorContentKeyTypeValues returns an array of possible values for the StreamingLocatorContentKeyType const type. +func PossibleStreamingLocatorContentKeyTypeValues() []StreamingLocatorContentKeyType { + return []StreamingLocatorContentKeyType{StreamingLocatorContentKeyTypeCommonEncryptionCbcs, StreamingLocatorContentKeyTypeCommonEncryptionCenc, StreamingLocatorContentKeyTypeEnvelopeEncryption} +} + +// StreamingPolicyStreamingProtocol enumerates the values for streaming policy streaming protocol. +type StreamingPolicyStreamingProtocol string + +const ( + // StreamingPolicyStreamingProtocolDash DASH protocol + StreamingPolicyStreamingProtocolDash StreamingPolicyStreamingProtocol = "Dash" + // StreamingPolicyStreamingProtocolDownload Download protocol + StreamingPolicyStreamingProtocolDownload StreamingPolicyStreamingProtocol = "Download" + // StreamingPolicyStreamingProtocolHls HLS protocol + StreamingPolicyStreamingProtocolHls StreamingPolicyStreamingProtocol = "Hls" + // StreamingPolicyStreamingProtocolSmoothStreaming SmoothStreaming protocol + StreamingPolicyStreamingProtocolSmoothStreaming StreamingPolicyStreamingProtocol = "SmoothStreaming" +) + +// PossibleStreamingPolicyStreamingProtocolValues returns an array of possible values for the StreamingPolicyStreamingProtocol const type. +func PossibleStreamingPolicyStreamingProtocolValues() []StreamingPolicyStreamingProtocol { + return []StreamingPolicyStreamingProtocol{StreamingPolicyStreamingProtocolDash, StreamingPolicyStreamingProtocolDownload, StreamingPolicyStreamingProtocolHls, StreamingPolicyStreamingProtocolSmoothStreaming} +} + +// StreamOptionsFlag enumerates the values for stream options flag. +type StreamOptionsFlag string + +const ( + // Default ... + Default StreamOptionsFlag = "Default" + // LowLatency ... + LowLatency StreamOptionsFlag = "LowLatency" +) + +// PossibleStreamOptionsFlagValues returns an array of possible values for the StreamOptionsFlag const type. +func PossibleStreamOptionsFlagValues() []StreamOptionsFlag { + return []StreamOptionsFlag{Default, LowLatency} +} + +// StretchMode enumerates the values for stretch mode. +type StretchMode string + +const ( + // StretchModeAutoFit Pad the output (with either letterbox or pillar box) to honor the output resolution, + // while ensuring that the active video region in the output has the same aspect ratio as the input. For + // example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the output will be + // at 1280x1280, which contains an inner rectangle of 1280x720 at aspect ratio of 16:9, and pillar box + // regions 280 pixels wide at the left and right. + StretchModeAutoFit StretchMode = "AutoFit" + // StretchModeAutoSize Override the output resolution, and change it to match the display aspect ratio of + // the input, without padding. For example, if the input is 1920x1080 and the encoding preset asks for + // 1280x1280, then the value in the preset is overridden, and the output will be at 1280x720, which + // maintains the input aspect ratio of 16:9. + StretchModeAutoSize StretchMode = "AutoSize" + // StretchModeNone Strictly respect the output resolution without considering the pixel aspect ratio or + // display aspect ratio of the input video. + StretchModeNone StretchMode = "None" +) + +// PossibleStretchModeValues returns an array of possible values for the StretchMode const type. +func PossibleStretchModeValues() []StretchMode { + return []StretchMode{StretchModeAutoFit, StretchModeAutoSize, StretchModeNone} +} + +// TrackPropertyCompareOperation enumerates the values for track property compare operation. +type TrackPropertyCompareOperation string + +const ( + // TrackPropertyCompareOperationEqual Equal operation + TrackPropertyCompareOperationEqual TrackPropertyCompareOperation = "Equal" + // TrackPropertyCompareOperationUnknown Unknown track property compare operation + TrackPropertyCompareOperationUnknown TrackPropertyCompareOperation = "Unknown" +) + +// PossibleTrackPropertyCompareOperationValues returns an array of possible values for the TrackPropertyCompareOperation const type. +func PossibleTrackPropertyCompareOperationValues() []TrackPropertyCompareOperation { + return []TrackPropertyCompareOperation{TrackPropertyCompareOperationEqual, TrackPropertyCompareOperationUnknown} +} + +// TrackPropertyType enumerates the values for track property type. +type TrackPropertyType string + +const ( + // TrackPropertyTypeFourCC Track FourCC + TrackPropertyTypeFourCC TrackPropertyType = "FourCC" + // TrackPropertyTypeUnknown Unknown track property + TrackPropertyTypeUnknown TrackPropertyType = "Unknown" +) + +// PossibleTrackPropertyTypeValues returns an array of possible values for the TrackPropertyType const type. +func PossibleTrackPropertyTypeValues() []TrackPropertyType { + return []TrackPropertyType{TrackPropertyTypeFourCC, TrackPropertyTypeUnknown} +} + +// AacAudio describes Advanced Audio Codec (AAC) audio encoding settings. +type AacAudio struct { + // Profile - The encoding profile to be used when encoding audio with AAC. Possible values include: 'AacLc', 'HeAacV1', 'HeAacV2' + Profile AacAudioProfile `json:"profile,omitempty"` + // Channels - The number of channels in the audio. + Channels *int32 `json:"channels,omitempty"` + // SamplingRate - The sampling rate to use for encoding in hertz. + SamplingRate *int32 `json:"samplingRate,omitempty"` + // Bitrate - The bitrate, in bits per second, of the output encoded audio. + Bitrate *int32 `json:"bitrate,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AacAudio. +func (aa AacAudio) MarshalJSON() ([]byte, error) { + aa.OdataType = OdataTypeMicrosoftMediaAacAudio + objectMap := make(map[string]interface{}) + if aa.Profile != "" { + objectMap["profile"] = aa.Profile + } + if aa.Channels != nil { + objectMap["channels"] = aa.Channels + } + if aa.SamplingRate != nil { + objectMap["samplingRate"] = aa.SamplingRate + } + if aa.Bitrate != nil { + objectMap["bitrate"] = aa.Bitrate + } + if aa.Label != nil { + objectMap["label"] = aa.Label + } + if aa.OdataType != "" { + objectMap["@odata.type"] = aa.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsBasicAudio() (BasicAudio, bool) { + return &aa, true +} + +// AsAacAudio is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsAacAudio() (*AacAudio, bool) { + return &aa, true +} + +// AsCopyVideo is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsBasicVideo() (BasicVideo, bool) { + return nil, false +} + +// AsImage is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for AacAudio. +func (aa AacAudio) AsBasicCodec() (BasicCodec, bool) { + return &aa, true +} + +// AccountFilter an Account Filter. +type AccountFilter struct { + autorest.Response `json:"-"` + *FilterProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountFilter. +func (af AccountFilter) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if af.FilterProperties != nil { + objectMap["properties"] = af.FilterProperties + } + if af.ID != nil { + objectMap["id"] = af.ID + } + if af.Name != nil { + objectMap["name"] = af.Name + } + if af.Type != nil { + objectMap["type"] = af.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountFilter struct. +func (af *AccountFilter) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var filterProperties FilterProperties + err = json.Unmarshal(*v, &filterProperties) + if err != nil { + return err + } + af.FilterProperties = &filterProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + af.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + af.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + af.Type = &typeVar + } + } + } + + return nil +} + +// AccountFilterCollection a collection of AccountFilter items. +type AccountFilterCollection struct { + autorest.Response `json:"-"` + // Value - A collection of AccountFilter items. + Value *[]AccountFilter `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// AccountFilterCollectionIterator provides access to a complete listing of AccountFilter values. +type AccountFilterCollectionIterator struct { + i int + page AccountFilterCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AccountFilterCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFilterCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AccountFilterCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AccountFilterCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AccountFilterCollectionIterator) Response() AccountFilterCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AccountFilterCollectionIterator) Value() AccountFilter { + if !iter.page.NotDone() { + return AccountFilter{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AccountFilterCollectionIterator type. +func NewAccountFilterCollectionIterator(page AccountFilterCollectionPage) AccountFilterCollectionIterator { + return AccountFilterCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (afc AccountFilterCollection) IsEmpty() bool { + return afc.Value == nil || len(*afc.Value) == 0 +} + +// accountFilterCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (afc AccountFilterCollection) accountFilterCollectionPreparer(ctx context.Context) (*http.Request, error) { + if afc.OdataNextLink == nil || len(to.String(afc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(afc.OdataNextLink))) +} + +// AccountFilterCollectionPage contains a page of AccountFilter values. +type AccountFilterCollectionPage struct { + fn func(context.Context, AccountFilterCollection) (AccountFilterCollection, error) + afc AccountFilterCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AccountFilterCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountFilterCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.afc) + if err != nil { + return err + } + page.afc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AccountFilterCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AccountFilterCollectionPage) NotDone() bool { + return !page.afc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AccountFilterCollectionPage) Response() AccountFilterCollection { + return page.afc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AccountFilterCollectionPage) Values() []AccountFilter { + if page.afc.IsEmpty() { + return nil + } + return *page.afc.Value +} + +// Creates a new instance of the AccountFilterCollectionPage type. +func NewAccountFilterCollectionPage(getNextPage func(context.Context, AccountFilterCollection) (AccountFilterCollection, error)) AccountFilterCollectionPage { + return AccountFilterCollectionPage{fn: getNextPage} +} + +// AkamaiAccessControl akamai access control +type AkamaiAccessControl struct { + // AkamaiSignatureHeaderAuthenticationKeyList - authentication key list + AkamaiSignatureHeaderAuthenticationKeyList *[]AkamaiSignatureHeaderAuthenticationKey `json:"akamaiSignatureHeaderAuthenticationKeyList,omitempty"` +} + +// AkamaiSignatureHeaderAuthenticationKey akamai Signature Header authentication key. +type AkamaiSignatureHeaderAuthenticationKey struct { + // Identifier - identifier of the key + Identifier *string `json:"identifier,omitempty"` + // Base64Key - authentication key + Base64Key *string `json:"base64Key,omitempty"` + // Expiration - The expiration time of the authentication key. + Expiration *date.Time `json:"expiration,omitempty"` +} + +// APIError the API error. +type APIError struct { + // Error - The error properties. + Error *ODataError `json:"error,omitempty"` +} + +// Asset an Asset. +type Asset struct { + autorest.Response `json:"-"` + // AssetProperties - The resource properties. + *AssetProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Asset. +func (a Asset) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if a.AssetProperties != nil { + objectMap["properties"] = a.AssetProperties + } + if a.ID != nil { + objectMap["id"] = a.ID + } + if a.Name != nil { + objectMap["name"] = a.Name + } + if a.Type != nil { + objectMap["type"] = a.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Asset struct. +func (a *Asset) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var assetProperties AssetProperties + err = json.Unmarshal(*v, &assetProperties) + if err != nil { + return err + } + a.AssetProperties = &assetProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + a.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + a.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + a.Type = &typeVar + } + } + } + + return nil +} + +// AssetCollection a collection of Asset items. +type AssetCollection struct { + autorest.Response `json:"-"` + // Value - A collection of Asset items. + Value *[]Asset `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// AssetCollectionIterator provides access to a complete listing of Asset values. +type AssetCollectionIterator struct { + i int + page AssetCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AssetCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AssetCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AssetCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AssetCollectionIterator) Response() AssetCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AssetCollectionIterator) Value() Asset { + if !iter.page.NotDone() { + return Asset{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AssetCollectionIterator type. +func NewAssetCollectionIterator(page AssetCollectionPage) AssetCollectionIterator { + return AssetCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ac AssetCollection) IsEmpty() bool { + return ac.Value == nil || len(*ac.Value) == 0 +} + +// assetCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ac AssetCollection) assetCollectionPreparer(ctx context.Context) (*http.Request, error) { + if ac.OdataNextLink == nil || len(to.String(ac.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ac.OdataNextLink))) +} + +// AssetCollectionPage contains a page of Asset values. +type AssetCollectionPage struct { + fn func(context.Context, AssetCollection) (AssetCollection, error) + ac AssetCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AssetCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ac) + if err != nil { + return err + } + page.ac = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AssetCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AssetCollectionPage) NotDone() bool { + return !page.ac.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AssetCollectionPage) Response() AssetCollection { + return page.ac +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AssetCollectionPage) Values() []Asset { + if page.ac.IsEmpty() { + return nil + } + return *page.ac.Value +} + +// Creates a new instance of the AssetCollectionPage type. +func NewAssetCollectionPage(getNextPage func(context.Context, AssetCollection) (AssetCollection, error)) AssetCollectionPage { + return AssetCollectionPage{fn: getNextPage} +} + +// AssetContainerSas the Asset Storage container SAS URLs. +type AssetContainerSas struct { + autorest.Response `json:"-"` + // AssetContainerSasUrls - The list of Asset container SAS URLs. + AssetContainerSasUrls *[]string `json:"assetContainerSasUrls,omitempty"` +} + +// AssetFileEncryptionMetadata the Asset File Storage encryption metadata. +type AssetFileEncryptionMetadata struct { + // InitializationVector - The Asset File initialization vector. + InitializationVector *string `json:"initializationVector,omitempty"` + // AssetFileName - The Asset File name. + AssetFileName *string `json:"assetFileName,omitempty"` + // AssetFileID - The Asset File Id. + AssetFileID *uuid.UUID `json:"assetFileId,omitempty"` +} + +// AssetFilter an Asset Filter. +type AssetFilter struct { + autorest.Response `json:"-"` + *FilterProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AssetFilter. +func (af AssetFilter) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if af.FilterProperties != nil { + objectMap["properties"] = af.FilterProperties + } + if af.ID != nil { + objectMap["id"] = af.ID + } + if af.Name != nil { + objectMap["name"] = af.Name + } + if af.Type != nil { + objectMap["type"] = af.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AssetFilter struct. +func (af *AssetFilter) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var filterProperties FilterProperties + err = json.Unmarshal(*v, &filterProperties) + if err != nil { + return err + } + af.FilterProperties = &filterProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + af.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + af.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + af.Type = &typeVar + } + } + } + + return nil +} + +// AssetFilterCollection a collection of AssetFilter items. +type AssetFilterCollection struct { + autorest.Response `json:"-"` + // Value - A collection of AssetFilter items. + Value *[]AssetFilter `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// AssetFilterCollectionIterator provides access to a complete listing of AssetFilter values. +type AssetFilterCollectionIterator struct { + i int + page AssetFilterCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AssetFilterCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFilterCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AssetFilterCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AssetFilterCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AssetFilterCollectionIterator) Response() AssetFilterCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AssetFilterCollectionIterator) Value() AssetFilter { + if !iter.page.NotDone() { + return AssetFilter{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AssetFilterCollectionIterator type. +func NewAssetFilterCollectionIterator(page AssetFilterCollectionPage) AssetFilterCollectionIterator { + return AssetFilterCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (afc AssetFilterCollection) IsEmpty() bool { + return afc.Value == nil || len(*afc.Value) == 0 +} + +// assetFilterCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (afc AssetFilterCollection) assetFilterCollectionPreparer(ctx context.Context) (*http.Request, error) { + if afc.OdataNextLink == nil || len(to.String(afc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(afc.OdataNextLink))) +} + +// AssetFilterCollectionPage contains a page of AssetFilter values. +type AssetFilterCollectionPage struct { + fn func(context.Context, AssetFilterCollection) (AssetFilterCollection, error) + afc AssetFilterCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AssetFilterCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AssetFilterCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.afc) + if err != nil { + return err + } + page.afc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AssetFilterCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AssetFilterCollectionPage) NotDone() bool { + return !page.afc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AssetFilterCollectionPage) Response() AssetFilterCollection { + return page.afc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AssetFilterCollectionPage) Values() []AssetFilter { + if page.afc.IsEmpty() { + return nil + } + return *page.afc.Value +} + +// Creates a new instance of the AssetFilterCollectionPage type. +func NewAssetFilterCollectionPage(getNextPage func(context.Context, AssetFilterCollection) (AssetFilterCollection, error)) AssetFilterCollectionPage { + return AssetFilterCollectionPage{fn: getNextPage} +} + +// AssetProperties the Asset properties. +type AssetProperties struct { + // AssetID - The Asset ID. + AssetID *uuid.UUID `json:"assetId,omitempty"` + // Created - The creation date of the Asset. + Created *date.Time `json:"created,omitempty"` + // LastModified - The last modified date of the Asset. + LastModified *date.Time `json:"lastModified,omitempty"` + // AlternateID - The alternate ID of the Asset. + AlternateID *string `json:"alternateId,omitempty"` + // Description - The Asset description. + Description *string `json:"description,omitempty"` + // Container - The name of the asset blob container. + Container *string `json:"container,omitempty"` + // StorageAccountName - The name of the storage account. + StorageAccountName *string `json:"storageAccountName,omitempty"` + // StorageEncryptionFormat - The Asset encryption format. One of None or MediaStorageEncryption. Possible values include: 'None', 'MediaStorageClientEncryption' + StorageEncryptionFormat AssetStorageEncryptionFormat `json:"storageEncryptionFormat,omitempty"` +} + +// AssetStreamingLocator properties of the Streaming Locator. +type AssetStreamingLocator struct { + // Name - Streaming Locator name. + Name *string `json:"name,omitempty"` + // AssetName - Asset Name. + AssetName *string `json:"assetName,omitempty"` + // Created - The creation time of the Streaming Locator. + Created *date.Time `json:"created,omitempty"` + // StartTime - The start time of the Streaming Locator. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - The end time of the Streaming Locator. + EndTime *date.Time `json:"endTime,omitempty"` + // StreamingLocatorID - StreamingLocatorId of the Streaming Locator. + StreamingLocatorID *uuid.UUID `json:"streamingLocatorId,omitempty"` + // StreamingPolicyName - Name of the Streaming Policy used by this Streaming Locator. + StreamingPolicyName *string `json:"streamingPolicyName,omitempty"` + // DefaultContentKeyPolicyName - Name of the default ContentKeyPolicy used by this Streaming Locator. + DefaultContentKeyPolicyName *string `json:"defaultContentKeyPolicyName,omitempty"` +} + +// BasicAudio defines the common properties for all audio codecs. +type BasicAudio interface { + AsAacAudio() (*AacAudio, bool) + AsAudio() (*Audio, bool) +} + +// Audio defines the common properties for all audio codecs. +type Audio struct { + // Channels - The number of channels in the audio. + Channels *int32 `json:"channels,omitempty"` + // SamplingRate - The sampling rate to use for encoding in hertz. + SamplingRate *int32 `json:"samplingRate,omitempty"` + // Bitrate - The bitrate, in bits per second, of the output encoded audio. + Bitrate *int32 `json:"bitrate,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +func unmarshalBasicAudio(body []byte) (BasicAudio, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaAacAudio): + var aa AacAudio + err := json.Unmarshal(body, &aa) + return aa, err + default: + var a Audio + err := json.Unmarshal(body, &a) + return a, err + } +} +func unmarshalBasicAudioArray(body []byte) ([]BasicAudio, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + aArray := make([]BasicAudio, len(rawMessages)) + + for index, rawMessage := range rawMessages { + a, err := unmarshalBasicAudio(*rawMessage) + if err != nil { + return nil, err + } + aArray[index] = a + } + return aArray, nil +} + +// MarshalJSON is the custom marshaler for Audio. +func (a Audio) MarshalJSON() ([]byte, error) { + a.OdataType = OdataTypeMicrosoftMediaAudio + objectMap := make(map[string]interface{}) + if a.Channels != nil { + objectMap["channels"] = a.Channels + } + if a.SamplingRate != nil { + objectMap["samplingRate"] = a.SamplingRate + } + if a.Bitrate != nil { + objectMap["bitrate"] = a.Bitrate + } + if a.Label != nil { + objectMap["label"] = a.Label + } + if a.OdataType != "" { + objectMap["@odata.type"] = a.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for Audio. +func (a Audio) AsAudio() (*Audio, bool) { + return &a, true +} + +// AsBasicAudio is the BasicCodec implementation for Audio. +func (a Audio) AsBasicAudio() (BasicAudio, bool) { + return &a, true +} + +// AsAacAudio is the BasicCodec implementation for Audio. +func (a Audio) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for Audio. +func (a Audio) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for Audio. +func (a Audio) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for Audio. +func (a Audio) AsBasicVideo() (BasicVideo, bool) { + return nil, false +} + +// AsImage is the BasicCodec implementation for Audio. +func (a Audio) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for Audio. +func (a Audio) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for Audio. +func (a Audio) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for Audio. +func (a Audio) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for Audio. +func (a Audio) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for Audio. +func (a Audio) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for Audio. +func (a Audio) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for Audio. +func (a Audio) AsBasicCodec() (BasicCodec, bool) { + return &a, true +} + +// BasicAudioAnalyzerPreset the Audio Analyzer preset applies a pre-defined set of AI-based analysis operations, +// including speech transcription. Currently, the preset supports processing of content with a single audio track. +type BasicAudioAnalyzerPreset interface { + AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) + AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) +} + +// AudioAnalyzerPreset the Audio Analyzer preset applies a pre-defined set of AI-based analysis operations, +// including speech transcription. Currently, the preset supports processing of content with a single audio +// track. +type AudioAnalyzerPreset struct { + // AudioLanguage - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list of supported languages are, 'en-US', 'en-GB', 'es-ES', 'es-MX', 'fr-FR', 'it-IT', 'ja-JP', 'pt-BR', 'zh-CN', 'de-DE', 'ar-EG', 'ru-RU', 'hi-IN'. If not specified, automatic language detection would be employed. This feature currently supports English, Chinese, French, German, Italian, Japanese, Spanish, Russian, and Portuguese. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to English. + AudioLanguage *string `json:"audioLanguage,omitempty"` + // OdataType - Possible values include: 'OdataTypePreset', 'OdataTypeMicrosoftMediaAudioAnalyzerPreset', 'OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset', 'OdataTypeMicrosoftMediaStandardEncoderPreset', 'OdataTypeMicrosoftMediaVideoAnalyzerPreset' + OdataType OdataTypeBasicPreset `json:"@odata.type,omitempty"` +} + +func unmarshalBasicAudioAnalyzerPreset(body []byte) (BasicAudioAnalyzerPreset, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaVideoAnalyzerPreset): + var vap VideoAnalyzerPreset + err := json.Unmarshal(body, &vap) + return vap, err + default: + var aap AudioAnalyzerPreset + err := json.Unmarshal(body, &aap) + return aap, err + } +} +func unmarshalBasicAudioAnalyzerPresetArray(body []byte) ([]BasicAudioAnalyzerPreset, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + aapArray := make([]BasicAudioAnalyzerPreset, len(rawMessages)) + + for index, rawMessage := range rawMessages { + aap, err := unmarshalBasicAudioAnalyzerPreset(*rawMessage) + if err != nil { + return nil, err + } + aapArray[index] = aap + } + return aapArray, nil +} + +// MarshalJSON is the custom marshaler for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) MarshalJSON() ([]byte, error) { + aap.OdataType = OdataTypeMicrosoftMediaAudioAnalyzerPreset + objectMap := make(map[string]interface{}) + if aap.AudioLanguage != nil { + objectMap["audioLanguage"] = aap.AudioLanguage + } + if aap.OdataType != "" { + objectMap["@odata.type"] = aap.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioAnalyzerPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) { + return &aap, true +} + +// AsBasicAudioAnalyzerPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsBasicAudioAnalyzerPreset() (BasicAudioAnalyzerPreset, bool) { + return &aap, true +} + +// AsBuiltInStandardEncoderPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsBuiltInStandardEncoderPreset() (*BuiltInStandardEncoderPreset, bool) { + return nil, false +} + +// AsStandardEncoderPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsStandardEncoderPreset() (*StandardEncoderPreset, bool) { + return nil, false +} + +// AsVideoAnalyzerPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) { + return nil, false +} + +// AsPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsPreset() (*Preset, bool) { + return nil, false +} + +// AsBasicPreset is the BasicPreset implementation for AudioAnalyzerPreset. +func (aap AudioAnalyzerPreset) AsBasicPreset() (BasicPreset, bool) { + return &aap, true +} + +// AudioOverlay describes the properties of an audio overlay. +type AudioOverlay struct { + // InputLabel - The label of the job input which is to be used as an overlay. The Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. + InputLabel *string `json:"inputLabel,omitempty"` + // Start - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds in to the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty"` + // End - The position in the input video at which the overlay ends. The value should be in ISO 8601 duration format. For example, PT30S to end the overlay at 30 seconds in to the input video. If not specified the overlay will be applied until the end of the input video if inputLoop is true. Else, if inputLoop is false, then overlay will last as long as the duration of the overlay media. + End *string `json:"end,omitempty"` + // FadeInDuration - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty"` + // FadeOutDuration - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty"` + // AudioGainLevel - The gain level of audio in the overlay. The value should be in the range [0, 1.0]. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty"` + // OdataType - Possible values include: 'OdataTypeOverlay', 'OdataTypeMicrosoftMediaAudioOverlay', 'OdataTypeMicrosoftMediaVideoOverlay' + OdataType OdataTypeBasicOverlay `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AudioOverlay. +func (ao AudioOverlay) MarshalJSON() ([]byte, error) { + ao.OdataType = OdataTypeMicrosoftMediaAudioOverlay + objectMap := make(map[string]interface{}) + if ao.InputLabel != nil { + objectMap["inputLabel"] = ao.InputLabel + } + if ao.Start != nil { + objectMap["start"] = ao.Start + } + if ao.End != nil { + objectMap["end"] = ao.End + } + if ao.FadeInDuration != nil { + objectMap["fadeInDuration"] = ao.FadeInDuration + } + if ao.FadeOutDuration != nil { + objectMap["fadeOutDuration"] = ao.FadeOutDuration + } + if ao.AudioGainLevel != nil { + objectMap["audioGainLevel"] = ao.AudioGainLevel + } + if ao.OdataType != "" { + objectMap["@odata.type"] = ao.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioOverlay is the BasicOverlay implementation for AudioOverlay. +func (ao AudioOverlay) AsAudioOverlay() (*AudioOverlay, bool) { + return &ao, true +} + +// AsVideoOverlay is the BasicOverlay implementation for AudioOverlay. +func (ao AudioOverlay) AsVideoOverlay() (*VideoOverlay, bool) { + return nil, false +} + +// AsOverlay is the BasicOverlay implementation for AudioOverlay. +func (ao AudioOverlay) AsOverlay() (*Overlay, bool) { + return nil, false +} + +// AsBasicOverlay is the BasicOverlay implementation for AudioOverlay. +func (ao AudioOverlay) AsBasicOverlay() (BasicOverlay, bool) { + return &ao, true +} + +// BuiltInStandardEncoderPreset describes a built-in preset for encoding the input video with the Standard +// Encoder. +type BuiltInStandardEncoderPreset struct { + // PresetName - The built-in preset to be used for encoding videos. Possible values include: 'H264SingleBitrateSD', 'H264SingleBitrate720p', 'H264SingleBitrate1080p', 'AdaptiveStreaming', 'AACGoodQualityAudio', 'H264MultipleBitrate1080p', 'H264MultipleBitrate720p', 'H264MultipleBitrateSD' + PresetName EncoderNamedPreset `json:"presetName,omitempty"` + // OdataType - Possible values include: 'OdataTypePreset', 'OdataTypeMicrosoftMediaAudioAnalyzerPreset', 'OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset', 'OdataTypeMicrosoftMediaStandardEncoderPreset', 'OdataTypeMicrosoftMediaVideoAnalyzerPreset' + OdataType OdataTypeBasicPreset `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) MarshalJSON() ([]byte, error) { + bisep.OdataType = OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset + objectMap := make(map[string]interface{}) + if bisep.PresetName != "" { + objectMap["presetName"] = bisep.PresetName + } + if bisep.OdataType != "" { + objectMap["@odata.type"] = bisep.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioAnalyzerPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBasicAudioAnalyzerPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsBasicAudioAnalyzerPreset() (BasicAudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBuiltInStandardEncoderPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsBuiltInStandardEncoderPreset() (*BuiltInStandardEncoderPreset, bool) { + return &bisep, true +} + +// AsStandardEncoderPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsStandardEncoderPreset() (*StandardEncoderPreset, bool) { + return nil, false +} + +// AsVideoAnalyzerPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) { + return nil, false +} + +// AsPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsPreset() (*Preset, bool) { + return nil, false +} + +// AsBasicPreset is the BasicPreset implementation for BuiltInStandardEncoderPreset. +func (bisep BuiltInStandardEncoderPreset) AsBasicPreset() (BasicPreset, bool) { + return &bisep, true +} + +// CbcsDrmConfiguration class to specify DRM configurations of CommonEncryptionCbcs scheme in Streaming +// Policy +type CbcsDrmConfiguration struct { + // FairPlay - FairPlay configurations + FairPlay *StreamingPolicyFairPlayConfiguration `json:"fairPlay,omitempty"` + // PlayReady - PlayReady configurations + PlayReady *StreamingPolicyPlayReadyConfiguration `json:"playReady,omitempty"` + // Widevine - Widevine configurations + Widevine *StreamingPolicyWidevineConfiguration `json:"widevine,omitempty"` +} + +// CencDrmConfiguration class to specify DRM configurations of CommonEncryptionCenc scheme in Streaming +// Policy +type CencDrmConfiguration struct { + // PlayReady - PlayReady configurations + PlayReady *StreamingPolicyPlayReadyConfiguration `json:"playReady,omitempty"` + // Widevine - Widevine configurations + Widevine *StreamingPolicyWidevineConfiguration `json:"widevine,omitempty"` +} + +// CheckNameAvailabilityInput the input to the check name availability request. +type CheckNameAvailabilityInput struct { + // Name - The account name. + Name *string `json:"name,omitempty"` + // Type - The account type. For a Media Services account, this should be 'MediaServices'. + Type *string `json:"type,omitempty"` +} + +// BasicCodec describes the basic properties of all codecs. +type BasicCodec interface { + AsAudio() (*Audio, bool) + AsBasicAudio() (BasicAudio, bool) + AsAacAudio() (*AacAudio, bool) + AsCopyVideo() (*CopyVideo, bool) + AsVideo() (*Video, bool) + AsBasicVideo() (BasicVideo, bool) + AsImage() (*Image, bool) + AsBasicImage() (BasicImage, bool) + AsCopyAudio() (*CopyAudio, bool) + AsH264Video() (*H264Video, bool) + AsJpgImage() (*JpgImage, bool) + AsPngImage() (*PngImage, bool) + AsCodec() (*Codec, bool) +} + +// Codec describes the basic properties of all codecs. +type Codec struct { + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +func unmarshalBasicCodec(body []byte) (BasicCodec, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaAudio): + var a Audio + err := json.Unmarshal(body, &a) + return a, err + case string(OdataTypeMicrosoftMediaAacAudio): + var aa AacAudio + err := json.Unmarshal(body, &aa) + return aa, err + case string(OdataTypeMicrosoftMediaCopyVideo): + var cv CopyVideo + err := json.Unmarshal(body, &cv) + return cv, err + case string(OdataTypeMicrosoftMediaVideo): + var vVar Video + err := json.Unmarshal(body, &vVar) + return vVar, err + case string(OdataTypeMicrosoftMediaImage): + var i Image + err := json.Unmarshal(body, &i) + return i, err + case string(OdataTypeMicrosoftMediaCopyAudio): + var ca CopyAudio + err := json.Unmarshal(body, &ca) + return ca, err + case string(OdataTypeMicrosoftMediaH264Video): + var hv H264Video + err := json.Unmarshal(body, &hv) + return hv, err + case string(OdataTypeMicrosoftMediaJpgImage): + var ji JpgImage + err := json.Unmarshal(body, &ji) + return ji, err + case string(OdataTypeMicrosoftMediaPngImage): + var pi PngImage + err := json.Unmarshal(body, &pi) + return pi, err + default: + var c Codec + err := json.Unmarshal(body, &c) + return c, err + } +} +func unmarshalBasicCodecArray(body []byte) ([]BasicCodec, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + cArray := make([]BasicCodec, len(rawMessages)) + + for index, rawMessage := range rawMessages { + c, err := unmarshalBasicCodec(*rawMessage) + if err != nil { + return nil, err + } + cArray[index] = c + } + return cArray, nil +} + +// MarshalJSON is the custom marshaler for Codec. +func (c Codec) MarshalJSON() ([]byte, error) { + c.OdataType = OdataTypeCodec + objectMap := make(map[string]interface{}) + if c.Label != nil { + objectMap["label"] = c.Label + } + if c.OdataType != "" { + objectMap["@odata.type"] = c.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for Codec. +func (c Codec) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for Codec. +func (c Codec) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for Codec. +func (c Codec) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for Codec. +func (c Codec) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for Codec. +func (c Codec) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for Codec. +func (c Codec) AsBasicVideo() (BasicVideo, bool) { + return nil, false +} + +// AsImage is the BasicCodec implementation for Codec. +func (c Codec) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for Codec. +func (c Codec) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for Codec. +func (c Codec) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for Codec. +func (c Codec) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for Codec. +func (c Codec) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for Codec. +func (c Codec) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for Codec. +func (c Codec) AsCodec() (*Codec, bool) { + return &c, true +} + +// AsBasicCodec is the BasicCodec implementation for Codec. +func (c Codec) AsBasicCodec() (BasicCodec, bool) { + return &c, true +} + +// CommonEncryptionCbcs class for CommonEncryptionCbcs encryption scheme +type CommonEncryptionCbcs struct { + // EnabledProtocols - Representing supported protocols + EnabledProtocols *EnabledProtocols `json:"enabledProtocols,omitempty"` + // ClearTracks - Representing which tracks should not be encrypted + ClearTracks *[]TrackSelection `json:"clearTracks,omitempty"` + // ContentKeys - Representing default content key for each encryption scheme and separate content keys for specific tracks + ContentKeys *StreamingPolicyContentKeys `json:"contentKeys,omitempty"` + // Drm - Configuration of DRMs for current encryption scheme + Drm *CbcsDrmConfiguration `json:"drm,omitempty"` +} + +// CommonEncryptionCenc class for envelope encryption scheme +type CommonEncryptionCenc struct { + // EnabledProtocols - Representing supported protocols + EnabledProtocols *EnabledProtocols `json:"enabledProtocols,omitempty"` + // ClearTracks - Representing which tracks should not be encrypted + ClearTracks *[]TrackSelection `json:"clearTracks,omitempty"` + // ContentKeys - Representing default content key for each encryption scheme and separate content keys for specific tracks + ContentKeys *StreamingPolicyContentKeys `json:"contentKeys,omitempty"` + // Drm - Configuration of DRMs for CommonEncryptionCenc encryption scheme + Drm *CencDrmConfiguration `json:"drm,omitempty"` +} + +// ContentKeyPolicy a Content Key Policy resource. +type ContentKeyPolicy struct { + autorest.Response `json:"-"` + *ContentKeyPolicyProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicy. +func (ckp ContentKeyPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ckp.ContentKeyPolicyProperties != nil { + objectMap["properties"] = ckp.ContentKeyPolicyProperties + } + if ckp.ID != nil { + objectMap["id"] = ckp.ID + } + if ckp.Name != nil { + objectMap["name"] = ckp.Name + } + if ckp.Type != nil { + objectMap["type"] = ckp.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ContentKeyPolicy struct. +func (ckp *ContentKeyPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var contentKeyPolicyProperties ContentKeyPolicyProperties + err = json.Unmarshal(*v, &contentKeyPolicyProperties) + if err != nil { + return err + } + ckp.ContentKeyPolicyProperties = &contentKeyPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ckp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ckp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ckp.Type = &typeVar + } + } + } + + return nil +} + +// ContentKeyPolicyClearKeyConfiguration represents a configuration for non-DRM keys. +type ContentKeyPolicyClearKeyConfiguration struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration' + OdataType OdataTypeBasicContentKeyPolicyConfiguration `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) MarshalJSON() ([]byte, error) { + ckpckc.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration + objectMap := make(map[string]interface{}) + if ckpckc.OdataType != "" { + objectMap["@odata.type"] = ckpckc.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyClearKeyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) { + return &ckpckc, true +} + +// AsContentKeyPolicyUnknownConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyWidevineConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyFairPlayConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyClearKeyConfiguration. +func (ckpckc ContentKeyPolicyClearKeyConfiguration) AsBasicContentKeyPolicyConfiguration() (BasicContentKeyPolicyConfiguration, bool) { + return &ckpckc, true +} + +// ContentKeyPolicyCollection a collection of ContentKeyPolicy items. +type ContentKeyPolicyCollection struct { + autorest.Response `json:"-"` + // Value - A collection of ContentKeyPolicy items. + Value *[]ContentKeyPolicy `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// ContentKeyPolicyCollectionIterator provides access to a complete listing of ContentKeyPolicy values. +type ContentKeyPolicyCollectionIterator struct { + i int + page ContentKeyPolicyCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ContentKeyPolicyCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPolicyCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ContentKeyPolicyCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ContentKeyPolicyCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ContentKeyPolicyCollectionIterator) Response() ContentKeyPolicyCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ContentKeyPolicyCollectionIterator) Value() ContentKeyPolicy { + if !iter.page.NotDone() { + return ContentKeyPolicy{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ContentKeyPolicyCollectionIterator type. +func NewContentKeyPolicyCollectionIterator(page ContentKeyPolicyCollectionPage) ContentKeyPolicyCollectionIterator { + return ContentKeyPolicyCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ckpc ContentKeyPolicyCollection) IsEmpty() bool { + return ckpc.Value == nil || len(*ckpc.Value) == 0 +} + +// contentKeyPolicyCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ckpc ContentKeyPolicyCollection) contentKeyPolicyCollectionPreparer(ctx context.Context) (*http.Request, error) { + if ckpc.OdataNextLink == nil || len(to.String(ckpc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ckpc.OdataNextLink))) +} + +// ContentKeyPolicyCollectionPage contains a page of ContentKeyPolicy values. +type ContentKeyPolicyCollectionPage struct { + fn func(context.Context, ContentKeyPolicyCollection) (ContentKeyPolicyCollection, error) + ckpc ContentKeyPolicyCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ContentKeyPolicyCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContentKeyPolicyCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ckpc) + if err != nil { + return err + } + page.ckpc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ContentKeyPolicyCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ContentKeyPolicyCollectionPage) NotDone() bool { + return !page.ckpc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ContentKeyPolicyCollectionPage) Response() ContentKeyPolicyCollection { + return page.ckpc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ContentKeyPolicyCollectionPage) Values() []ContentKeyPolicy { + if page.ckpc.IsEmpty() { + return nil + } + return *page.ckpc.Value +} + +// Creates a new instance of the ContentKeyPolicyCollectionPage type. +func NewContentKeyPolicyCollectionPage(getNextPage func(context.Context, ContentKeyPolicyCollection) (ContentKeyPolicyCollection, error)) ContentKeyPolicyCollectionPage { + return ContentKeyPolicyCollectionPage{fn: getNextPage} +} + +// BasicContentKeyPolicyConfiguration base class for Content Key Policy configuration. A derived class must be used to +// create a configuration. +type BasicContentKeyPolicyConfiguration interface { + AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) + AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) + AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) + AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) + AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) + AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) +} + +// ContentKeyPolicyConfiguration base class for Content Key Policy configuration. A derived class must be used +// to create a configuration. +type ContentKeyPolicyConfiguration struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration' + OdataType OdataTypeBasicContentKeyPolicyConfiguration `json:"@odata.type,omitempty"` +} + +func unmarshalBasicContentKeyPolicyConfiguration(body []byte) (BasicContentKeyPolicyConfiguration, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration): + var ckpckc ContentKeyPolicyClearKeyConfiguration + err := json.Unmarshal(body, &ckpckc) + return ckpckc, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration): + var ckpuc ContentKeyPolicyUnknownConfiguration + err := json.Unmarshal(body, &ckpuc) + return ckpuc, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration): + var ckpwc ContentKeyPolicyWidevineConfiguration + err := json.Unmarshal(body, &ckpwc) + return ckpwc, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration): + var ckpprc ContentKeyPolicyPlayReadyConfiguration + err := json.Unmarshal(body, &ckpprc) + return ckpprc, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration): + var ckpfpc ContentKeyPolicyFairPlayConfiguration + err := json.Unmarshal(body, &ckpfpc) + return ckpfpc, err + default: + var ckpc ContentKeyPolicyConfiguration + err := json.Unmarshal(body, &ckpc) + return ckpc, err + } +} +func unmarshalBasicContentKeyPolicyConfigurationArray(body []byte) ([]BasicContentKeyPolicyConfiguration, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ckpcArray := make([]BasicContentKeyPolicyConfiguration, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ckpc, err := unmarshalBasicContentKeyPolicyConfiguration(*rawMessage) + if err != nil { + return nil, err + } + ckpcArray[index] = ckpc + } + return ckpcArray, nil +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) MarshalJSON() ([]byte, error) { + ckpc.OdataType = OdataTypeContentKeyPolicyConfiguration + objectMap := make(map[string]interface{}) + if ckpc.OdataType != "" { + objectMap["@odata.type"] = ckpc.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyClearKeyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyWidevineConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyFairPlayConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) { + return &ckpc, true +} + +// AsBasicContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyConfiguration. +func (ckpc ContentKeyPolicyConfiguration) AsBasicContentKeyPolicyConfiguration() (BasicContentKeyPolicyConfiguration, bool) { + return &ckpc, true +} + +// ContentKeyPolicyFairPlayConfiguration specifies a configuration for FairPlay licenses. +type ContentKeyPolicyFairPlayConfiguration struct { + // Ask - The key that must be used as FairPlay Application Secret key. + Ask *[]byte `json:"ask,omitempty"` + // FairPlayPfxPassword - The password encrypting FairPlay certificate in PKCS 12 (pfx) format. + FairPlayPfxPassword *string `json:"fairPlayPfxPassword,omitempty"` + // FairPlayPfx - The Base64 representation of FairPlay certificate in PKCS 12 (pfx) format (including private key). + FairPlayPfx *string `json:"fairPlayPfx,omitempty"` + // RentalAndLeaseKeyType - The rental and lease key type. Possible values include: 'Unknown', 'Undefined', 'PersistentUnlimited', 'PersistentLimited' + RentalAndLeaseKeyType ContentKeyPolicyFairPlayRentalAndLeaseKeyType `json:"rentalAndLeaseKeyType,omitempty"` + // RentalDuration - The rental duration. Must be greater than or equal to 0. + RentalDuration *int64 `json:"rentalDuration,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration' + OdataType OdataTypeBasicContentKeyPolicyConfiguration `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) MarshalJSON() ([]byte, error) { + ckpfpc.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration + objectMap := make(map[string]interface{}) + if ckpfpc.Ask != nil { + objectMap["ask"] = ckpfpc.Ask + } + if ckpfpc.FairPlayPfxPassword != nil { + objectMap["fairPlayPfxPassword"] = ckpfpc.FairPlayPfxPassword + } + if ckpfpc.FairPlayPfx != nil { + objectMap["fairPlayPfx"] = ckpfpc.FairPlayPfx + } + if ckpfpc.RentalAndLeaseKeyType != "" { + objectMap["rentalAndLeaseKeyType"] = ckpfpc.RentalAndLeaseKeyType + } + if ckpfpc.RentalDuration != nil { + objectMap["rentalDuration"] = ckpfpc.RentalDuration + } + if ckpfpc.OdataType != "" { + objectMap["@odata.type"] = ckpfpc.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyClearKeyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyWidevineConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyFairPlayConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) { + return &ckpfpc, true +} + +// AsContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyFairPlayConfiguration. +func (ckpfpc ContentKeyPolicyFairPlayConfiguration) AsBasicContentKeyPolicyConfiguration() (BasicContentKeyPolicyConfiguration, bool) { + return &ckpfpc, true +} + +// ContentKeyPolicyOpenRestriction represents an open restriction. License or key will be delivered on +// every request. +type ContentKeyPolicyOpenRestriction struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction' + OdataType OdataTypeBasicContentKeyPolicyRestriction `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyOpenRestriction. +func (ckpor ContentKeyPolicyOpenRestriction) MarshalJSON() ([]byte, error) { + ckpor.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction + objectMap := make(map[string]interface{}) + if ckpor.OdataType != "" { + objectMap["@odata.type"] = ckpor.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyOpenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyOpenRestriction. +func (ckpor ContentKeyPolicyOpenRestriction) AsContentKeyPolicyOpenRestriction() (*ContentKeyPolicyOpenRestriction, bool) { + return &ckpor, true +} + +// AsContentKeyPolicyUnknownRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyOpenRestriction. +func (ckpor ContentKeyPolicyOpenRestriction) AsContentKeyPolicyUnknownRestriction() (*ContentKeyPolicyUnknownRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyTokenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyOpenRestriction. +func (ckpor ContentKeyPolicyOpenRestriction) AsContentKeyPolicyTokenRestriction() (*ContentKeyPolicyTokenRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyOpenRestriction. +func (ckpor ContentKeyPolicyOpenRestriction) AsContentKeyPolicyRestriction() (*ContentKeyPolicyRestriction, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyOpenRestriction. +func (ckpor ContentKeyPolicyOpenRestriction) AsBasicContentKeyPolicyRestriction() (BasicContentKeyPolicyRestriction, bool) { + return &ckpor, true +} + +// ContentKeyPolicyOption represents a policy option. +type ContentKeyPolicyOption struct { + // PolicyOptionID - The legacy Policy Option ID. + PolicyOptionID *uuid.UUID `json:"policyOptionId,omitempty"` + // Name - The Policy Option description. + Name *string `json:"name,omitempty"` + // Configuration - The key delivery configuration. + Configuration BasicContentKeyPolicyConfiguration `json:"configuration,omitempty"` + // Restriction - The requirements that must be met to deliver keys with this configuration + Restriction BasicContentKeyPolicyRestriction `json:"restriction,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for ContentKeyPolicyOption struct. +func (ckpo *ContentKeyPolicyOption) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "policyOptionId": + if v != nil { + var policyOptionID uuid.UUID + err = json.Unmarshal(*v, &policyOptionID) + if err != nil { + return err + } + ckpo.PolicyOptionID = &policyOptionID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ckpo.Name = &name + } + case "configuration": + if v != nil { + configuration, err := unmarshalBasicContentKeyPolicyConfiguration(*v) + if err != nil { + return err + } + ckpo.Configuration = configuration + } + case "restriction": + if v != nil { + restriction, err := unmarshalBasicContentKeyPolicyRestriction(*v) + if err != nil { + return err + } + ckpo.Restriction = restriction + } + } + } + + return nil +} + +// ContentKeyPolicyPlayReadyConfiguration specifies a configuration for PlayReady licenses. +type ContentKeyPolicyPlayReadyConfiguration struct { + // Licenses - The PlayReady licenses. + Licenses *[]ContentKeyPolicyPlayReadyLicense `json:"licenses,omitempty"` + // ResponseCustomData - The custom response data. + ResponseCustomData *string `json:"responseCustomData,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration' + OdataType OdataTypeBasicContentKeyPolicyConfiguration `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) MarshalJSON() ([]byte, error) { + ckpprc.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration + objectMap := make(map[string]interface{}) + if ckpprc.Licenses != nil { + objectMap["licenses"] = ckpprc.Licenses + } + if ckpprc.ResponseCustomData != nil { + objectMap["responseCustomData"] = ckpprc.ResponseCustomData + } + if ckpprc.OdataType != "" { + objectMap["@odata.type"] = ckpprc.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyClearKeyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyWidevineConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) { + return &ckpprc, true +} + +// AsContentKeyPolicyFairPlayConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyPlayReadyConfiguration. +func (ckpprc ContentKeyPolicyPlayReadyConfiguration) AsBasicContentKeyPolicyConfiguration() (BasicContentKeyPolicyConfiguration, bool) { + return &ckpprc, true +} + +// ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader specifies that the content key ID is in the +// PlayReady header. +type ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyPlayReadyContentKeyLocation', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier' + OdataType OdataType `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader. +func (ckpprcekfh ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader) MarshalJSON() ([]byte, error) { + ckpprcekfh.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader + objectMap := make(map[string]interface{}) + if ckpprcekfh.OdataType != "" { + objectMap["@odata.type"] = ckpprcekfh.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader. +func (ckpprcekfh ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader) AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, bool) { + return &ckpprcekfh, true +} + +// AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader. +func (ckpprcekfh ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader) AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyContentKeyLocation is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader. +func (ckpprcekfh ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader) AsContentKeyPolicyPlayReadyContentKeyLocation() (*ContentKeyPolicyPlayReadyContentKeyLocation, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyPlayReadyContentKeyLocation is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader. +func (ckpprcekfh ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader) AsBasicContentKeyPolicyPlayReadyContentKeyLocation() (BasicContentKeyPolicyPlayReadyContentKeyLocation, bool) { + return &ckpprcekfh, true +} + +// ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier specifies that the content key ID is +// specified in the PlayReady configuration. +type ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier struct { + // KeyID - The content key ID. + KeyID *uuid.UUID `json:"keyId,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyPlayReadyContentKeyLocation', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier' + OdataType OdataType `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier. +func (ckpprcekfki ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier) MarshalJSON() ([]byte, error) { + ckpprcekfki.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier + objectMap := make(map[string]interface{}) + if ckpprcekfki.KeyID != nil { + objectMap["keyId"] = ckpprcekfki.KeyID + } + if ckpprcekfki.OdataType != "" { + objectMap["@odata.type"] = ckpprcekfki.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier. +func (ckpprcekfki ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier) AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier. +func (ckpprcekfki ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier) AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier, bool) { + return &ckpprcekfki, true +} + +// AsContentKeyPolicyPlayReadyContentKeyLocation is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier. +func (ckpprcekfki ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier) AsContentKeyPolicyPlayReadyContentKeyLocation() (*ContentKeyPolicyPlayReadyContentKeyLocation, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyPlayReadyContentKeyLocation is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier. +func (ckpprcekfki ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier) AsBasicContentKeyPolicyPlayReadyContentKeyLocation() (BasicContentKeyPolicyPlayReadyContentKeyLocation, bool) { + return &ckpprcekfki, true +} + +// BasicContentKeyPolicyPlayReadyContentKeyLocation base class for content key ID location. A derived class must be +// used to represent the location. +type BasicContentKeyPolicyPlayReadyContentKeyLocation interface { + AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, bool) + AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier, bool) + AsContentKeyPolicyPlayReadyContentKeyLocation() (*ContentKeyPolicyPlayReadyContentKeyLocation, bool) +} + +// ContentKeyPolicyPlayReadyContentKeyLocation base class for content key ID location. A derived class must be +// used to represent the location. +type ContentKeyPolicyPlayReadyContentKeyLocation struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyPlayReadyContentKeyLocation', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier' + OdataType OdataType `json:"@odata.type,omitempty"` +} + +func unmarshalBasicContentKeyPolicyPlayReadyContentKeyLocation(body []byte) (BasicContentKeyPolicyPlayReadyContentKeyLocation, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader): + var ckpprcekfh ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader + err := json.Unmarshal(body, &ckpprcekfh) + return ckpprcekfh, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier): + var ckpprcekfki ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier + err := json.Unmarshal(body, &ckpprcekfki) + return ckpprcekfki, err + default: + var ckpprckl ContentKeyPolicyPlayReadyContentKeyLocation + err := json.Unmarshal(body, &ckpprckl) + return ckpprckl, err + } +} +func unmarshalBasicContentKeyPolicyPlayReadyContentKeyLocationArray(body []byte) ([]BasicContentKeyPolicyPlayReadyContentKeyLocation, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ckpprcklArray := make([]BasicContentKeyPolicyPlayReadyContentKeyLocation, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ckpprckl, err := unmarshalBasicContentKeyPolicyPlayReadyContentKeyLocation(*rawMessage) + if err != nil { + return nil, err + } + ckpprcklArray[index] = ckpprckl + } + return ckpprcklArray, nil +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyPlayReadyContentKeyLocation. +func (ckpprckl ContentKeyPolicyPlayReadyContentKeyLocation) MarshalJSON() ([]byte, error) { + ckpprckl.OdataType = OdataTypeContentKeyPolicyPlayReadyContentKeyLocation + objectMap := make(map[string]interface{}) + if ckpprckl.OdataType != "" { + objectMap["@odata.type"] = ckpprckl.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentKeyLocation. +func (ckpprckl ContentKeyPolicyPlayReadyContentKeyLocation) AsContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentKeyLocation. +func (ckpprckl ContentKeyPolicyPlayReadyContentKeyLocation) AsContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier() (*ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyContentKeyLocation is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentKeyLocation. +func (ckpprckl ContentKeyPolicyPlayReadyContentKeyLocation) AsContentKeyPolicyPlayReadyContentKeyLocation() (*ContentKeyPolicyPlayReadyContentKeyLocation, bool) { + return &ckpprckl, true +} + +// AsBasicContentKeyPolicyPlayReadyContentKeyLocation is the BasicContentKeyPolicyPlayReadyContentKeyLocation implementation for ContentKeyPolicyPlayReadyContentKeyLocation. +func (ckpprckl ContentKeyPolicyPlayReadyContentKeyLocation) AsBasicContentKeyPolicyPlayReadyContentKeyLocation() (BasicContentKeyPolicyPlayReadyContentKeyLocation, bool) { + return &ckpprckl, true +} + +// ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction configures the Explicit Analog Television +// Output Restriction control bits. For further details see the PlayReady Compliance Rules. +type ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction struct { + // BestEffort - Indicates whether this restriction is enforced on a Best Effort basis. + BestEffort *bool `json:"bestEffort,omitempty"` + // ConfigurationData - Configures the restriction control bits. Must be between 0 and 3 inclusive. + ConfigurationData *int32 `json:"configurationData,omitempty"` +} + +// ContentKeyPolicyPlayReadyLicense the PlayReady license +type ContentKeyPolicyPlayReadyLicense struct { + // AllowTestDevices - A flag indicating whether test devices can use the license. + AllowTestDevices *bool `json:"allowTestDevices,omitempty"` + // BeginDate - The begin date of license + BeginDate *date.Time `json:"beginDate,omitempty"` + // ExpirationDate - The expiration date of license. + ExpirationDate *date.Time `json:"expirationDate,omitempty"` + // RelativeBeginDate - The relative begin date of license. + RelativeBeginDate *string `json:"relativeBeginDate,omitempty"` + // RelativeExpirationDate - The relative expiration date of license. + RelativeExpirationDate *string `json:"relativeExpirationDate,omitempty"` + // GracePeriod - The grace period of license. + GracePeriod *string `json:"gracePeriod,omitempty"` + // PlayRight - The license PlayRight + PlayRight *ContentKeyPolicyPlayReadyPlayRight `json:"playRight,omitempty"` + // LicenseType - The license type. Possible values include: 'ContentKeyPolicyPlayReadyLicenseTypeUnknown', 'ContentKeyPolicyPlayReadyLicenseTypeNonPersistent', 'ContentKeyPolicyPlayReadyLicenseTypePersistent' + LicenseType ContentKeyPolicyPlayReadyLicenseType `json:"licenseType,omitempty"` + // ContentKeyLocation - The content key location. + ContentKeyLocation BasicContentKeyPolicyPlayReadyContentKeyLocation `json:"contentKeyLocation,omitempty"` + // ContentType - The PlayReady content type. Possible values include: 'ContentKeyPolicyPlayReadyContentTypeUnknown', 'ContentKeyPolicyPlayReadyContentTypeUnspecified', 'ContentKeyPolicyPlayReadyContentTypeUltraVioletDownload', 'ContentKeyPolicyPlayReadyContentTypeUltraVioletStreaming' + ContentType ContentKeyPolicyPlayReadyContentType `json:"contentType,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for ContentKeyPolicyPlayReadyLicense struct. +func (ckpprl *ContentKeyPolicyPlayReadyLicense) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "allowTestDevices": + if v != nil { + var allowTestDevices bool + err = json.Unmarshal(*v, &allowTestDevices) + if err != nil { + return err + } + ckpprl.AllowTestDevices = &allowTestDevices + } + case "beginDate": + if v != nil { + var beginDate date.Time + err = json.Unmarshal(*v, &beginDate) + if err != nil { + return err + } + ckpprl.BeginDate = &beginDate + } + case "expirationDate": + if v != nil { + var expirationDate date.Time + err = json.Unmarshal(*v, &expirationDate) + if err != nil { + return err + } + ckpprl.ExpirationDate = &expirationDate + } + case "relativeBeginDate": + if v != nil { + var relativeBeginDate string + err = json.Unmarshal(*v, &relativeBeginDate) + if err != nil { + return err + } + ckpprl.RelativeBeginDate = &relativeBeginDate + } + case "relativeExpirationDate": + if v != nil { + var relativeExpirationDate string + err = json.Unmarshal(*v, &relativeExpirationDate) + if err != nil { + return err + } + ckpprl.RelativeExpirationDate = &relativeExpirationDate + } + case "gracePeriod": + if v != nil { + var gracePeriod string + err = json.Unmarshal(*v, &gracePeriod) + if err != nil { + return err + } + ckpprl.GracePeriod = &gracePeriod + } + case "playRight": + if v != nil { + var playRight ContentKeyPolicyPlayReadyPlayRight + err = json.Unmarshal(*v, &playRight) + if err != nil { + return err + } + ckpprl.PlayRight = &playRight + } + case "licenseType": + if v != nil { + var licenseType ContentKeyPolicyPlayReadyLicenseType + err = json.Unmarshal(*v, &licenseType) + if err != nil { + return err + } + ckpprl.LicenseType = licenseType + } + case "contentKeyLocation": + if v != nil { + contentKeyLocation, err := unmarshalBasicContentKeyPolicyPlayReadyContentKeyLocation(*v) + if err != nil { + return err + } + ckpprl.ContentKeyLocation = contentKeyLocation + } + case "contentType": + if v != nil { + var contentType ContentKeyPolicyPlayReadyContentType + err = json.Unmarshal(*v, &contentType) + if err != nil { + return err + } + ckpprl.ContentType = contentType + } + } + } + + return nil +} + +// ContentKeyPolicyPlayReadyPlayRight configures the Play Right in the PlayReady license. +type ContentKeyPolicyPlayReadyPlayRight struct { + // FirstPlayExpiration - The amount of time that the license is valid after the license is first used to play content. + FirstPlayExpiration *string `json:"firstPlayExpiration,omitempty"` + // ScmsRestriction - Configures the Serial Copy Management System (SCMS) in the license. Must be between 0 and 3 inclusive. + ScmsRestriction *int32 `json:"scmsRestriction,omitempty"` + // AgcAndColorStripeRestriction - Configures Automatic Gain Control (AGC) and Color Stripe in the license. Must be between 0 and 3 inclusive. + AgcAndColorStripeRestriction *int32 `json:"agcAndColorStripeRestriction,omitempty"` + // ExplicitAnalogTelevisionOutputRestriction - Configures the Explicit Analog Television Output Restriction in the license. Configuration data must be between 0 and 3 inclusive. + ExplicitAnalogTelevisionOutputRestriction *ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction `json:"explicitAnalogTelevisionOutputRestriction,omitempty"` + // DigitalVideoOnlyContentRestriction - Enables the Image Constraint For Analog Component Video Restriction in the license. + DigitalVideoOnlyContentRestriction *bool `json:"digitalVideoOnlyContentRestriction,omitempty"` + // ImageConstraintForAnalogComponentVideoRestriction - Enables the Image Constraint For Analog Component Video Restriction in the license. + ImageConstraintForAnalogComponentVideoRestriction *bool `json:"imageConstraintForAnalogComponentVideoRestriction,omitempty"` + // ImageConstraintForAnalogComputerMonitorRestriction - Enables the Image Constraint For Analog Component Video Restriction in the license. + ImageConstraintForAnalogComputerMonitorRestriction *bool `json:"imageConstraintForAnalogComputerMonitorRestriction,omitempty"` + // AllowPassingVideoContentToUnknownOutput - Configures Unknown output handling settings of the license. Possible values include: 'ContentKeyPolicyPlayReadyUnknownOutputPassingOptionUnknown', 'ContentKeyPolicyPlayReadyUnknownOutputPassingOptionNotAllowed', 'ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowed', 'ContentKeyPolicyPlayReadyUnknownOutputPassingOptionAllowedWithVideoConstriction' + AllowPassingVideoContentToUnknownOutput ContentKeyPolicyPlayReadyUnknownOutputPassingOption `json:"allowPassingVideoContentToUnknownOutput,omitempty"` + // UncompressedDigitalVideoOpl - Specifies the output protection level for uncompressed digital video. + UncompressedDigitalVideoOpl *int32 `json:"uncompressedDigitalVideoOpl,omitempty"` + // CompressedDigitalVideoOpl - Specifies the output protection level for compressed digital video. + CompressedDigitalVideoOpl *int32 `json:"compressedDigitalVideoOpl,omitempty"` + // AnalogVideoOpl - Specifies the output protection level for compressed digital audio. + AnalogVideoOpl *int32 `json:"analogVideoOpl,omitempty"` + // CompressedDigitalAudioOpl - Specifies the output protection level for compressed digital audio. + CompressedDigitalAudioOpl *int32 `json:"compressedDigitalAudioOpl,omitempty"` + // UncompressedDigitalAudioOpl - Specifies the output protection level for uncompressed digital audio. + UncompressedDigitalAudioOpl *int32 `json:"uncompressedDigitalAudioOpl,omitempty"` +} + +// ContentKeyPolicyProperties the properties of the Content Key Policy. +type ContentKeyPolicyProperties struct { + autorest.Response `json:"-"` + // PolicyID - The legacy Policy ID. + PolicyID *uuid.UUID `json:"policyId,omitempty"` + // Created - The creation date of the Policy + Created *date.Time `json:"created,omitempty"` + // LastModified - The last modified date of the Policy + LastModified *date.Time `json:"lastModified,omitempty"` + // Description - A description for the Policy. + Description *string `json:"description,omitempty"` + // Options - The Key Policy options. + Options *[]ContentKeyPolicyOption `json:"options,omitempty"` +} + +// BasicContentKeyPolicyRestriction base class for Content Key Policy restrictions. A derived class must be used to +// create a restriction. +type BasicContentKeyPolicyRestriction interface { + AsContentKeyPolicyOpenRestriction() (*ContentKeyPolicyOpenRestriction, bool) + AsContentKeyPolicyUnknownRestriction() (*ContentKeyPolicyUnknownRestriction, bool) + AsContentKeyPolicyTokenRestriction() (*ContentKeyPolicyTokenRestriction, bool) + AsContentKeyPolicyRestriction() (*ContentKeyPolicyRestriction, bool) +} + +// ContentKeyPolicyRestriction base class for Content Key Policy restrictions. A derived class must be used to +// create a restriction. +type ContentKeyPolicyRestriction struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction' + OdataType OdataTypeBasicContentKeyPolicyRestriction `json:"@odata.type,omitempty"` +} + +func unmarshalBasicContentKeyPolicyRestriction(body []byte) (BasicContentKeyPolicyRestriction, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction): + var ckpor ContentKeyPolicyOpenRestriction + err := json.Unmarshal(body, &ckpor) + return ckpor, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction): + var ckpur ContentKeyPolicyUnknownRestriction + err := json.Unmarshal(body, &ckpur) + return ckpur, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction): + var ckptr ContentKeyPolicyTokenRestriction + err := json.Unmarshal(body, &ckptr) + return ckptr, err + default: + var ckpr ContentKeyPolicyRestriction + err := json.Unmarshal(body, &ckpr) + return ckpr, err + } +} +func unmarshalBasicContentKeyPolicyRestrictionArray(body []byte) ([]BasicContentKeyPolicyRestriction, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ckprArray := make([]BasicContentKeyPolicyRestriction, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ckpr, err := unmarshalBasicContentKeyPolicyRestriction(*rawMessage) + if err != nil { + return nil, err + } + ckprArray[index] = ckpr + } + return ckprArray, nil +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyRestriction. +func (ckpr ContentKeyPolicyRestriction) MarshalJSON() ([]byte, error) { + ckpr.OdataType = OdataTypeContentKeyPolicyRestriction + objectMap := make(map[string]interface{}) + if ckpr.OdataType != "" { + objectMap["@odata.type"] = ckpr.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyOpenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyRestriction. +func (ckpr ContentKeyPolicyRestriction) AsContentKeyPolicyOpenRestriction() (*ContentKeyPolicyOpenRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyRestriction. +func (ckpr ContentKeyPolicyRestriction) AsContentKeyPolicyUnknownRestriction() (*ContentKeyPolicyUnknownRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyTokenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyRestriction. +func (ckpr ContentKeyPolicyRestriction) AsContentKeyPolicyTokenRestriction() (*ContentKeyPolicyTokenRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyRestriction. +func (ckpr ContentKeyPolicyRestriction) AsContentKeyPolicyRestriction() (*ContentKeyPolicyRestriction, bool) { + return &ckpr, true +} + +// AsBasicContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyRestriction. +func (ckpr ContentKeyPolicyRestriction) AsBasicContentKeyPolicyRestriction() (BasicContentKeyPolicyRestriction, bool) { + return &ckpr, true +} + +// BasicContentKeyPolicyRestrictionTokenKey base class for Content Key Policy key for token validation. A derived class +// must be used to create a token key. +type BasicContentKeyPolicyRestrictionTokenKey interface { + AsContentKeyPolicySymmetricTokenKey() (*ContentKeyPolicySymmetricTokenKey, bool) + AsContentKeyPolicyRsaTokenKey() (*ContentKeyPolicyRsaTokenKey, bool) + AsContentKeyPolicyX509CertificateTokenKey() (*ContentKeyPolicyX509CertificateTokenKey, bool) + AsContentKeyPolicyRestrictionTokenKey() (*ContentKeyPolicyRestrictionTokenKey, bool) +} + +// ContentKeyPolicyRestrictionTokenKey base class for Content Key Policy key for token validation. A derived +// class must be used to create a token key. +type ContentKeyPolicyRestrictionTokenKey struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestrictionTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey' + OdataType OdataTypeBasicContentKeyPolicyRestrictionTokenKey `json:"@odata.type,omitempty"` +} + +func unmarshalBasicContentKeyPolicyRestrictionTokenKey(body []byte) (BasicContentKeyPolicyRestrictionTokenKey, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey): + var ckpstk ContentKeyPolicySymmetricTokenKey + err := json.Unmarshal(body, &ckpstk) + return ckpstk, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey): + var ckprtk ContentKeyPolicyRsaTokenKey + err := json.Unmarshal(body, &ckprtk) + return ckprtk, err + case string(OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey): + var ckpxctk ContentKeyPolicyX509CertificateTokenKey + err := json.Unmarshal(body, &ckpxctk) + return ckpxctk, err + default: + var ckprtk ContentKeyPolicyRestrictionTokenKey + err := json.Unmarshal(body, &ckprtk) + return ckprtk, err + } +} +func unmarshalBasicContentKeyPolicyRestrictionTokenKeyArray(body []byte) ([]BasicContentKeyPolicyRestrictionTokenKey, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ckprtkArray := make([]BasicContentKeyPolicyRestrictionTokenKey, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ckprtk, err := unmarshalBasicContentKeyPolicyRestrictionTokenKey(*rawMessage) + if err != nil { + return nil, err + } + ckprtkArray[index] = ckprtk + } + return ckprtkArray, nil +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyRestrictionTokenKey. +func (ckprtk ContentKeyPolicyRestrictionTokenKey) MarshalJSON() ([]byte, error) { + ckprtk.OdataType = OdataTypeContentKeyPolicyRestrictionTokenKey + objectMap := make(map[string]interface{}) + if ckprtk.OdataType != "" { + objectMap["@odata.type"] = ckprtk.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicySymmetricTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRestrictionTokenKey. +func (ckprtk ContentKeyPolicyRestrictionTokenKey) AsContentKeyPolicySymmetricTokenKey() (*ContentKeyPolicySymmetricTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyRsaTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRestrictionTokenKey. +func (ckprtk ContentKeyPolicyRestrictionTokenKey) AsContentKeyPolicyRsaTokenKey() (*ContentKeyPolicyRsaTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyX509CertificateTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRestrictionTokenKey. +func (ckprtk ContentKeyPolicyRestrictionTokenKey) AsContentKeyPolicyX509CertificateTokenKey() (*ContentKeyPolicyX509CertificateTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRestrictionTokenKey. +func (ckprtk ContentKeyPolicyRestrictionTokenKey) AsContentKeyPolicyRestrictionTokenKey() (*ContentKeyPolicyRestrictionTokenKey, bool) { + return &ckprtk, true +} + +// AsBasicContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRestrictionTokenKey. +func (ckprtk ContentKeyPolicyRestrictionTokenKey) AsBasicContentKeyPolicyRestrictionTokenKey() (BasicContentKeyPolicyRestrictionTokenKey, bool) { + return &ckprtk, true +} + +// ContentKeyPolicyRsaTokenKey specifies a RSA key for token validation +type ContentKeyPolicyRsaTokenKey struct { + // Exponent - The RSA Parameter exponent + Exponent *[]byte `json:"exponent,omitempty"` + // Modulus - The RSA Parameter modulus + Modulus *[]byte `json:"modulus,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestrictionTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey' + OdataType OdataTypeBasicContentKeyPolicyRestrictionTokenKey `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyRsaTokenKey. +func (ckprtk ContentKeyPolicyRsaTokenKey) MarshalJSON() ([]byte, error) { + ckprtk.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey + objectMap := make(map[string]interface{}) + if ckprtk.Exponent != nil { + objectMap["exponent"] = ckprtk.Exponent + } + if ckprtk.Modulus != nil { + objectMap["modulus"] = ckprtk.Modulus + } + if ckprtk.OdataType != "" { + objectMap["@odata.type"] = ckprtk.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicySymmetricTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRsaTokenKey. +func (ckprtk ContentKeyPolicyRsaTokenKey) AsContentKeyPolicySymmetricTokenKey() (*ContentKeyPolicySymmetricTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyRsaTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRsaTokenKey. +func (ckprtk ContentKeyPolicyRsaTokenKey) AsContentKeyPolicyRsaTokenKey() (*ContentKeyPolicyRsaTokenKey, bool) { + return &ckprtk, true +} + +// AsContentKeyPolicyX509CertificateTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRsaTokenKey. +func (ckprtk ContentKeyPolicyRsaTokenKey) AsContentKeyPolicyX509CertificateTokenKey() (*ContentKeyPolicyX509CertificateTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRsaTokenKey. +func (ckprtk ContentKeyPolicyRsaTokenKey) AsContentKeyPolicyRestrictionTokenKey() (*ContentKeyPolicyRestrictionTokenKey, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyRsaTokenKey. +func (ckprtk ContentKeyPolicyRsaTokenKey) AsBasicContentKeyPolicyRestrictionTokenKey() (BasicContentKeyPolicyRestrictionTokenKey, bool) { + return &ckprtk, true +} + +// ContentKeyPolicySymmetricTokenKey specifies a symmetric key for token validation. +type ContentKeyPolicySymmetricTokenKey struct { + // KeyValue - The key value of the key + KeyValue *[]byte `json:"keyValue,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestrictionTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey' + OdataType OdataTypeBasicContentKeyPolicyRestrictionTokenKey `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicySymmetricTokenKey. +func (ckpstk ContentKeyPolicySymmetricTokenKey) MarshalJSON() ([]byte, error) { + ckpstk.OdataType = OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey + objectMap := make(map[string]interface{}) + if ckpstk.KeyValue != nil { + objectMap["keyValue"] = ckpstk.KeyValue + } + if ckpstk.OdataType != "" { + objectMap["@odata.type"] = ckpstk.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicySymmetricTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicySymmetricTokenKey. +func (ckpstk ContentKeyPolicySymmetricTokenKey) AsContentKeyPolicySymmetricTokenKey() (*ContentKeyPolicySymmetricTokenKey, bool) { + return &ckpstk, true +} + +// AsContentKeyPolicyRsaTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicySymmetricTokenKey. +func (ckpstk ContentKeyPolicySymmetricTokenKey) AsContentKeyPolicyRsaTokenKey() (*ContentKeyPolicyRsaTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyX509CertificateTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicySymmetricTokenKey. +func (ckpstk ContentKeyPolicySymmetricTokenKey) AsContentKeyPolicyX509CertificateTokenKey() (*ContentKeyPolicyX509CertificateTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicySymmetricTokenKey. +func (ckpstk ContentKeyPolicySymmetricTokenKey) AsContentKeyPolicyRestrictionTokenKey() (*ContentKeyPolicyRestrictionTokenKey, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicySymmetricTokenKey. +func (ckpstk ContentKeyPolicySymmetricTokenKey) AsBasicContentKeyPolicyRestrictionTokenKey() (BasicContentKeyPolicyRestrictionTokenKey, bool) { + return &ckpstk, true +} + +// ContentKeyPolicyTokenClaim represents a token claim. +type ContentKeyPolicyTokenClaim struct { + // ClaimType - Token claim type. + ClaimType *string `json:"claimType,omitempty"` + // ClaimValue - Token claim value. + ClaimValue *string `json:"claimValue,omitempty"` +} + +// ContentKeyPolicyTokenRestriction represents a token restriction. Provided token must match these +// requirements for successful license or key delivery. +type ContentKeyPolicyTokenRestriction struct { + // Issuer - The token issuer. + Issuer *string `json:"issuer,omitempty"` + // Audience - The audience for the token. + Audience *string `json:"audience,omitempty"` + // PrimaryVerificationKey - The primary verification key. + PrimaryVerificationKey BasicContentKeyPolicyRestrictionTokenKey `json:"primaryVerificationKey,omitempty"` + // AlternateVerificationKeys - A list of alternative verification keys. + AlternateVerificationKeys *[]BasicContentKeyPolicyRestrictionTokenKey `json:"alternateVerificationKeys,omitempty"` + // RequiredClaims - A list of required token claims. + RequiredClaims *[]ContentKeyPolicyTokenClaim `json:"requiredClaims,omitempty"` + // RestrictionTokenType - The type of token. Possible values include: 'ContentKeyPolicyRestrictionTokenTypeUnknown', 'ContentKeyPolicyRestrictionTokenTypeSwt', 'ContentKeyPolicyRestrictionTokenTypeJwt' + RestrictionTokenType ContentKeyPolicyRestrictionTokenType `json:"restrictionTokenType,omitempty"` + // OpenIDConnectDiscoveryDocument - The OpenID connect discovery document. + OpenIDConnectDiscoveryDocument *string `json:"openIdConnectDiscoveryDocument,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction' + OdataType OdataTypeBasicContentKeyPolicyRestriction `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyTokenRestriction. +func (ckptr ContentKeyPolicyTokenRestriction) MarshalJSON() ([]byte, error) { + ckptr.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction + objectMap := make(map[string]interface{}) + if ckptr.Issuer != nil { + objectMap["issuer"] = ckptr.Issuer + } + if ckptr.Audience != nil { + objectMap["audience"] = ckptr.Audience + } + objectMap["primaryVerificationKey"] = ckptr.PrimaryVerificationKey + if ckptr.AlternateVerificationKeys != nil { + objectMap["alternateVerificationKeys"] = ckptr.AlternateVerificationKeys + } + if ckptr.RequiredClaims != nil { + objectMap["requiredClaims"] = ckptr.RequiredClaims + } + if ckptr.RestrictionTokenType != "" { + objectMap["restrictionTokenType"] = ckptr.RestrictionTokenType + } + if ckptr.OpenIDConnectDiscoveryDocument != nil { + objectMap["openIdConnectDiscoveryDocument"] = ckptr.OpenIDConnectDiscoveryDocument + } + if ckptr.OdataType != "" { + objectMap["@odata.type"] = ckptr.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyOpenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyTokenRestriction. +func (ckptr ContentKeyPolicyTokenRestriction) AsContentKeyPolicyOpenRestriction() (*ContentKeyPolicyOpenRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyTokenRestriction. +func (ckptr ContentKeyPolicyTokenRestriction) AsContentKeyPolicyUnknownRestriction() (*ContentKeyPolicyUnknownRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyTokenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyTokenRestriction. +func (ckptr ContentKeyPolicyTokenRestriction) AsContentKeyPolicyTokenRestriction() (*ContentKeyPolicyTokenRestriction, bool) { + return &ckptr, true +} + +// AsContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyTokenRestriction. +func (ckptr ContentKeyPolicyTokenRestriction) AsContentKeyPolicyRestriction() (*ContentKeyPolicyRestriction, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyTokenRestriction. +func (ckptr ContentKeyPolicyTokenRestriction) AsBasicContentKeyPolicyRestriction() (BasicContentKeyPolicyRestriction, bool) { + return &ckptr, true +} + +// UnmarshalJSON is the custom unmarshaler for ContentKeyPolicyTokenRestriction struct. +func (ckptr *ContentKeyPolicyTokenRestriction) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "issuer": + if v != nil { + var issuer string + err = json.Unmarshal(*v, &issuer) + if err != nil { + return err + } + ckptr.Issuer = &issuer + } + case "audience": + if v != nil { + var audience string + err = json.Unmarshal(*v, &audience) + if err != nil { + return err + } + ckptr.Audience = &audience + } + case "primaryVerificationKey": + if v != nil { + primaryVerificationKey, err := unmarshalBasicContentKeyPolicyRestrictionTokenKey(*v) + if err != nil { + return err + } + ckptr.PrimaryVerificationKey = primaryVerificationKey + } + case "alternateVerificationKeys": + if v != nil { + alternateVerificationKeys, err := unmarshalBasicContentKeyPolicyRestrictionTokenKeyArray(*v) + if err != nil { + return err + } + ckptr.AlternateVerificationKeys = &alternateVerificationKeys + } + case "requiredClaims": + if v != nil { + var requiredClaims []ContentKeyPolicyTokenClaim + err = json.Unmarshal(*v, &requiredClaims) + if err != nil { + return err + } + ckptr.RequiredClaims = &requiredClaims + } + case "restrictionTokenType": + if v != nil { + var restrictionTokenType ContentKeyPolicyRestrictionTokenType + err = json.Unmarshal(*v, &restrictionTokenType) + if err != nil { + return err + } + ckptr.RestrictionTokenType = restrictionTokenType + } + case "openIdConnectDiscoveryDocument": + if v != nil { + var openIDConnectDiscoveryDocument string + err = json.Unmarshal(*v, &openIDConnectDiscoveryDocument) + if err != nil { + return err + } + ckptr.OpenIDConnectDiscoveryDocument = &openIDConnectDiscoveryDocument + } + case "@odata.type": + if v != nil { + var odataType OdataTypeBasicContentKeyPolicyRestriction + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + ckptr.OdataType = odataType + } + } + } + + return nil +} + +// ContentKeyPolicyUnknownConfiguration represents a ContentKeyPolicyConfiguration that is unavailable in +// the current API version. +type ContentKeyPolicyUnknownConfiguration struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration' + OdataType OdataTypeBasicContentKeyPolicyConfiguration `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) MarshalJSON() ([]byte, error) { + ckpuc.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration + objectMap := make(map[string]interface{}) + if ckpuc.OdataType != "" { + objectMap["@odata.type"] = ckpuc.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyClearKeyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) { + return &ckpuc, true +} + +// AsContentKeyPolicyWidevineConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyPlayReadyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyFairPlayConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyUnknownConfiguration. +func (ckpuc ContentKeyPolicyUnknownConfiguration) AsBasicContentKeyPolicyConfiguration() (BasicContentKeyPolicyConfiguration, bool) { + return &ckpuc, true +} + +// ContentKeyPolicyUnknownRestriction represents a ContentKeyPolicyRestriction that is unavailable in the +// current API version. +type ContentKeyPolicyUnknownRestriction struct { + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyOpenRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction', 'OdataTypeMicrosoftMediaContentKeyPolicyTokenRestriction' + OdataType OdataTypeBasicContentKeyPolicyRestriction `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyUnknownRestriction. +func (ckpur ContentKeyPolicyUnknownRestriction) MarshalJSON() ([]byte, error) { + ckpur.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyUnknownRestriction + objectMap := make(map[string]interface{}) + if ckpur.OdataType != "" { + objectMap["@odata.type"] = ckpur.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyOpenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyUnknownRestriction. +func (ckpur ContentKeyPolicyUnknownRestriction) AsContentKeyPolicyOpenRestriction() (*ContentKeyPolicyOpenRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyUnknownRestriction. +func (ckpur ContentKeyPolicyUnknownRestriction) AsContentKeyPolicyUnknownRestriction() (*ContentKeyPolicyUnknownRestriction, bool) { + return &ckpur, true +} + +// AsContentKeyPolicyTokenRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyUnknownRestriction. +func (ckpur ContentKeyPolicyUnknownRestriction) AsContentKeyPolicyTokenRestriction() (*ContentKeyPolicyTokenRestriction, bool) { + return nil, false +} + +// AsContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyUnknownRestriction. +func (ckpur ContentKeyPolicyUnknownRestriction) AsContentKeyPolicyRestriction() (*ContentKeyPolicyRestriction, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyRestriction is the BasicContentKeyPolicyRestriction implementation for ContentKeyPolicyUnknownRestriction. +func (ckpur ContentKeyPolicyUnknownRestriction) AsBasicContentKeyPolicyRestriction() (BasicContentKeyPolicyRestriction, bool) { + return &ckpur, true +} + +// ContentKeyPolicyWidevineConfiguration specifies a configuration for Widevine licenses. +type ContentKeyPolicyWidevineConfiguration struct { + // WidevineTemplate - The Widevine template. + WidevineTemplate *string `json:"widevineTemplate,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyClearKeyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyUnknownConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyPlayReadyConfiguration', 'OdataTypeMicrosoftMediaContentKeyPolicyFairPlayConfiguration' + OdataType OdataTypeBasicContentKeyPolicyConfiguration `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) MarshalJSON() ([]byte, error) { + ckpwc.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyWidevineConfiguration + objectMap := make(map[string]interface{}) + if ckpwc.WidevineTemplate != nil { + objectMap["widevineTemplate"] = ckpwc.WidevineTemplate + } + if ckpwc.OdataType != "" { + objectMap["@odata.type"] = ckpwc.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicyClearKeyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsContentKeyPolicyClearKeyConfiguration() (*ContentKeyPolicyClearKeyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyUnknownConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsContentKeyPolicyUnknownConfiguration() (*ContentKeyPolicyUnknownConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyWidevineConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsContentKeyPolicyWidevineConfiguration() (*ContentKeyPolicyWidevineConfiguration, bool) { + return &ckpwc, true +} + +// AsContentKeyPolicyPlayReadyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsContentKeyPolicyPlayReadyConfiguration() (*ContentKeyPolicyPlayReadyConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyFairPlayConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsContentKeyPolicyFairPlayConfiguration() (*ContentKeyPolicyFairPlayConfiguration, bool) { + return nil, false +} + +// AsContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsContentKeyPolicyConfiguration() (*ContentKeyPolicyConfiguration, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyConfiguration is the BasicContentKeyPolicyConfiguration implementation for ContentKeyPolicyWidevineConfiguration. +func (ckpwc ContentKeyPolicyWidevineConfiguration) AsBasicContentKeyPolicyConfiguration() (BasicContentKeyPolicyConfiguration, bool) { + return &ckpwc, true +} + +// ContentKeyPolicyX509CertificateTokenKey specifies a certificate for token validation. +type ContentKeyPolicyX509CertificateTokenKey struct { + // RawBody - The raw data field of a certificate in PKCS 12 format (X509Certificate2 in .NET) + RawBody *[]byte `json:"rawBody,omitempty"` + // OdataType - Possible values include: 'OdataTypeContentKeyPolicyRestrictionTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicySymmetricTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyRsaTokenKey', 'OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey' + OdataType OdataTypeBasicContentKeyPolicyRestrictionTokenKey `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContentKeyPolicyX509CertificateTokenKey. +func (ckpxctk ContentKeyPolicyX509CertificateTokenKey) MarshalJSON() ([]byte, error) { + ckpxctk.OdataType = OdataTypeMicrosoftMediaContentKeyPolicyX509CertificateTokenKey + objectMap := make(map[string]interface{}) + if ckpxctk.RawBody != nil { + objectMap["rawBody"] = ckpxctk.RawBody + } + if ckpxctk.OdataType != "" { + objectMap["@odata.type"] = ckpxctk.OdataType + } + return json.Marshal(objectMap) +} + +// AsContentKeyPolicySymmetricTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyX509CertificateTokenKey. +func (ckpxctk ContentKeyPolicyX509CertificateTokenKey) AsContentKeyPolicySymmetricTokenKey() (*ContentKeyPolicySymmetricTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyRsaTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyX509CertificateTokenKey. +func (ckpxctk ContentKeyPolicyX509CertificateTokenKey) AsContentKeyPolicyRsaTokenKey() (*ContentKeyPolicyRsaTokenKey, bool) { + return nil, false +} + +// AsContentKeyPolicyX509CertificateTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyX509CertificateTokenKey. +func (ckpxctk ContentKeyPolicyX509CertificateTokenKey) AsContentKeyPolicyX509CertificateTokenKey() (*ContentKeyPolicyX509CertificateTokenKey, bool) { + return &ckpxctk, true +} + +// AsContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyX509CertificateTokenKey. +func (ckpxctk ContentKeyPolicyX509CertificateTokenKey) AsContentKeyPolicyRestrictionTokenKey() (*ContentKeyPolicyRestrictionTokenKey, bool) { + return nil, false +} + +// AsBasicContentKeyPolicyRestrictionTokenKey is the BasicContentKeyPolicyRestrictionTokenKey implementation for ContentKeyPolicyX509CertificateTokenKey. +func (ckpxctk ContentKeyPolicyX509CertificateTokenKey) AsBasicContentKeyPolicyRestrictionTokenKey() (BasicContentKeyPolicyRestrictionTokenKey, bool) { + return &ckpxctk, true +} + +// CopyAudio a codec flag, which tells the encoder to copy the input audio bitstream. +type CopyAudio struct { + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for CopyAudio. +func (ca CopyAudio) MarshalJSON() ([]byte, error) { + ca.OdataType = OdataTypeMicrosoftMediaCopyAudio + objectMap := make(map[string]interface{}) + if ca.Label != nil { + objectMap["label"] = ca.Label + } + if ca.OdataType != "" { + objectMap["@odata.type"] = ca.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsBasicVideo() (BasicVideo, bool) { + return nil, false +} + +// AsImage is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsCopyAudio() (*CopyAudio, bool) { + return &ca, true +} + +// AsH264Video is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for CopyAudio. +func (ca CopyAudio) AsBasicCodec() (BasicCodec, bool) { + return &ca, true +} + +// CopyVideo a codec flag, which tells the encoder to copy the input video bitstream without re-encoding. +type CopyVideo struct { + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for CopyVideo. +func (cv CopyVideo) MarshalJSON() ([]byte, error) { + cv.OdataType = OdataTypeMicrosoftMediaCopyVideo + objectMap := make(map[string]interface{}) + if cv.Label != nil { + objectMap["label"] = cv.Label + } + if cv.OdataType != "" { + objectMap["@odata.type"] = cv.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsCopyVideo() (*CopyVideo, bool) { + return &cv, true +} + +// AsVideo is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsBasicVideo() (BasicVideo, bool) { + return nil, false +} + +// AsImage is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for CopyVideo. +func (cv CopyVideo) AsBasicCodec() (BasicCodec, bool) { + return &cv, true +} + +// CrossSiteAccessPolicies the client access policy. +type CrossSiteAccessPolicies struct { + // ClientAccessPolicy - The content of clientaccesspolicy.xml used by Silverlight. + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty"` + // CrossDomainPolicy - The content of crossdomain.xml used by Silverlight. + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty"` +} + +// DefaultKey class to specify properties of default content key for each encryption scheme +type DefaultKey struct { + // Label - Label can be used to specify Content Key when creating a Streaming Locator + Label *string `json:"label,omitempty"` + // PolicyName - Policy used by Default Key + PolicyName *string `json:"policyName,omitempty"` +} + +// Deinterlace describes the de-interlacing settings. +type Deinterlace struct { + // Parity - The field parity for de-interlacing, defaults to Auto. Possible values include: 'Auto', 'TopFieldFirst', 'BottomFieldFirst' + Parity DeinterlaceParity `json:"parity,omitempty"` + // Mode - The deinterlacing mode. Defaults to AutoPixelAdaptive. Possible values include: 'Off', 'AutoPixelAdaptive' + Mode DeinterlaceMode `json:"mode,omitempty"` +} + +// EnabledProtocols class to specify which protocols are enabled +type EnabledProtocols struct { + // Download - Enable Download protocol or not + Download *bool `json:"download,omitempty"` + // Dash - Enable DASH protocol or not + Dash *bool `json:"dash,omitempty"` + // Hls - Enable HLS protocol or not + Hls *bool `json:"hls,omitempty"` + // SmoothStreaming - Enable SmoothStreaming protocol or not + SmoothStreaming *bool `json:"smoothStreaming,omitempty"` +} + +// EntityNameAvailabilityCheckOutput the response from the check name availability request. +type EntityNameAvailabilityCheckOutput struct { + autorest.Response `json:"-"` + // NameAvailable - Specifies if the name is available. + NameAvailable *bool `json:"nameAvailable,omitempty"` + // Reason - Specifies the reason if the name is not available. + Reason *string `json:"reason,omitempty"` + // Message - Specifies the detailed reason if the name is not available. + Message *string `json:"message,omitempty"` +} + +// EnvelopeEncryption class for EnvelopeEncryption encryption scheme +type EnvelopeEncryption struct { + // EnabledProtocols - Representing supported protocols + EnabledProtocols *EnabledProtocols `json:"enabledProtocols,omitempty"` + // ClearTracks - Representing which tracks should not be encrypted + ClearTracks *[]TrackSelection `json:"clearTracks,omitempty"` + // ContentKeys - Representing default content key for each encryption scheme and separate content keys for specific tracks + ContentKeys *StreamingPolicyContentKeys `json:"contentKeys,omitempty"` + // CustomKeyAcquisitionURLTemplate - KeyAcquisitionUrlTemplate is used to point to user specified service to delivery content keys + CustomKeyAcquisitionURLTemplate *string `json:"customKeyAcquisitionUrlTemplate,omitempty"` +} + +// FilterProperties the Media Filter properties. +type FilterProperties struct { + // PresentationTimeRange - The presentation time range. + PresentationTimeRange *PresentationTimeRange `json:"presentationTimeRange,omitempty"` + // FirstQuality - The first quality. + FirstQuality *FirstQuality `json:"firstQuality,omitempty"` + // Tracks - The tracks selection conditions. + Tracks *[]FilterTrackSelection `json:"tracks,omitempty"` +} + +// Filters describes all the filtering operations, such as de-interlacing, rotation etc. that are to be +// applied to the input media before encoding. +type Filters struct { + // Deinterlace - The de-interlacing settings. + Deinterlace *Deinterlace `json:"deinterlace,omitempty"` + // Rotation - The rotation, if any, to be applied to the input video, before it is encoded. Default is Auto. Possible values include: 'RotationAuto', 'RotationNone', 'RotationRotate0', 'RotationRotate90', 'RotationRotate180', 'RotationRotate270' + Rotation Rotation `json:"rotation,omitempty"` + // Crop - The parameters for the rectangular window with which to crop the input video. + Crop *Rectangle `json:"crop,omitempty"` + // Overlays - The properties of overlays to be applied to the input video. These could be audio, image or video overlays. + Overlays *[]BasicOverlay `json:"overlays,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for Filters struct. +func (f *Filters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "deinterlace": + if v != nil { + var deinterlace Deinterlace + err = json.Unmarshal(*v, &deinterlace) + if err != nil { + return err + } + f.Deinterlace = &deinterlace + } + case "rotation": + if v != nil { + var rotation Rotation + err = json.Unmarshal(*v, &rotation) + if err != nil { + return err + } + f.Rotation = rotation + } + case "crop": + if v != nil { + var crop Rectangle + err = json.Unmarshal(*v, &crop) + if err != nil { + return err + } + f.Crop = &crop + } + case "overlays": + if v != nil { + overlays, err := unmarshalBasicOverlayArray(*v) + if err != nil { + return err + } + f.Overlays = &overlays + } + } + } + + return nil +} + +// FilterTrackPropertyCondition the class to specify one track property condition. +type FilterTrackPropertyCondition struct { + // Property - The track property type. Possible values include: 'FilterTrackPropertyTypeUnknown', 'FilterTrackPropertyTypeType', 'FilterTrackPropertyTypeName', 'FilterTrackPropertyTypeLanguage', 'FilterTrackPropertyTypeFourCC', 'FilterTrackPropertyTypeBitrate' + Property FilterTrackPropertyType `json:"property,omitempty"` + // Value - The track proprty value. + Value *string `json:"value,omitempty"` + // Operation - The track property condition operation. Possible values include: 'Equal', 'NotEqual' + Operation FilterTrackPropertyCompareOperation `json:"operation,omitempty"` +} + +// FilterTrackSelection representing a list of FilterTrackPropertyConditions to select a track. The +// filters are combined using a logical AND operation. +type FilterTrackSelection struct { + // TrackSelections - The track selections. + TrackSelections *[]FilterTrackPropertyCondition `json:"trackSelections,omitempty"` +} + +// FirstQuality filter First Quality +type FirstQuality struct { + // Bitrate - The first quality bitrate. + Bitrate *int32 `json:"bitrate,omitempty"` +} + +// BasicFormat base class for output. +type BasicFormat interface { + AsImageFormat() (*ImageFormat, bool) + AsBasicImageFormat() (BasicImageFormat, bool) + AsJpgFormat() (*JpgFormat, bool) + AsPngFormat() (*PngFormat, bool) + AsMultiBitrateFormat() (*MultiBitrateFormat, bool) + AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) + AsMp4Format() (*Mp4Format, bool) + AsTransportStreamFormat() (*TransportStreamFormat, bool) + AsFormat() (*Format, bool) +} + +// Format base class for output. +type Format struct { + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +func unmarshalBasicFormat(body []byte) (BasicFormat, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaImageFormat): + var ifVar ImageFormat + err := json.Unmarshal(body, &ifVar) + return ifVar, err + case string(OdataTypeMicrosoftMediaJpgFormat): + var jf JpgFormat + err := json.Unmarshal(body, &jf) + return jf, err + case string(OdataTypeMicrosoftMediaPngFormat): + var pf PngFormat + err := json.Unmarshal(body, &pf) + return pf, err + case string(OdataTypeMicrosoftMediaMultiBitrateFormat): + var mbf MultiBitrateFormat + err := json.Unmarshal(body, &mbf) + return mbf, err + case string(OdataTypeMicrosoftMediaMp4Format): + var m4f Mp4Format + err := json.Unmarshal(body, &m4f) + return m4f, err + case string(OdataTypeMicrosoftMediaTransportStreamFormat): + var tsf TransportStreamFormat + err := json.Unmarshal(body, &tsf) + return tsf, err + default: + var f Format + err := json.Unmarshal(body, &f) + return f, err + } +} +func unmarshalBasicFormatArray(body []byte) ([]BasicFormat, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + fArray := make([]BasicFormat, len(rawMessages)) + + for index, rawMessage := range rawMessages { + f, err := unmarshalBasicFormat(*rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +// MarshalJSON is the custom marshaler for Format. +func (f Format) MarshalJSON() ([]byte, error) { + f.OdataType = OdataTypeFormat + objectMap := make(map[string]interface{}) + if f.FilenamePattern != nil { + objectMap["filenamePattern"] = f.FilenamePattern + } + if f.OdataType != "" { + objectMap["@odata.type"] = f.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for Format. +func (f Format) AsImageFormat() (*ImageFormat, bool) { + return nil, false +} + +// AsBasicImageFormat is the BasicFormat implementation for Format. +func (f Format) AsBasicImageFormat() (BasicImageFormat, bool) { + return nil, false +} + +// AsJpgFormat is the BasicFormat implementation for Format. +func (f Format) AsJpgFormat() (*JpgFormat, bool) { + return nil, false +} + +// AsPngFormat is the BasicFormat implementation for Format. +func (f Format) AsPngFormat() (*PngFormat, bool) { + return nil, false +} + +// AsMultiBitrateFormat is the BasicFormat implementation for Format. +func (f Format) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return nil, false +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for Format. +func (f Format) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return nil, false +} + +// AsMp4Format is the BasicFormat implementation for Format. +func (f Format) AsMp4Format() (*Mp4Format, bool) { + return nil, false +} + +// AsTransportStreamFormat is the BasicFormat implementation for Format. +func (f Format) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return nil, false +} + +// AsFormat is the BasicFormat implementation for Format. +func (f Format) AsFormat() (*Format, bool) { + return &f, true +} + +// AsBasicFormat is the BasicFormat implementation for Format. +func (f Format) AsBasicFormat() (BasicFormat, bool) { + return &f, true +} + +// H264Layer describes the settings to be used when encoding the input video into a desired output bitrate +// layer with the H.264 video codec. +type H264Layer struct { + // Profile - Which profile of the H.264 standard should be used when encoding this layer. Default is Auto. Possible values include: 'H264VideoProfileAuto', 'H264VideoProfileBaseline', 'H264VideoProfileMain', 'H264VideoProfileHigh', 'H264VideoProfileHigh422', 'H264VideoProfileHigh444' + Profile H264VideoProfile `json:"profile,omitempty"` + // Level - Which level of the H.264 standard should be used when encoding this layer. The value can be Auto, or a number that matches the H.264 profile. If not specified, the default is Auto, which lets the encoder choose the Level that is appropriate for this layer. + Level *string `json:"level,omitempty"` + // BufferWindow - The VBV buffer window length. The value should be in ISO 8601 format. The value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S). + BufferWindow *string `json:"bufferWindow,omitempty"` + // ReferenceFrames - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + ReferenceFrames *int32 `json:"referenceFrames,omitempty"` + // EntropyMode - The entropy mode to be used for this layer. If not specified, the encoder chooses the mode that is appropriate for the profile and level. Possible values include: 'Cabac', 'Cavlc' + EntropyMode EntropyMode `json:"entropyMode,omitempty"` + // Bitrate - The average bitrate in bits per second at which to encode the input video when generating this layer. This is a required field. + Bitrate *int32 `json:"bitrate,omitempty"` + // MaxBitrate - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + MaxBitrate *int32 `json:"maxBitrate,omitempty"` + // BFrames - The number of B-frames to be used when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + BFrames *int32 `json:"bFrames,omitempty"` + // FrameRate - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + FrameRate *string `json:"frameRate,omitempty"` + // Slices - The number of slices to be used when encoding this layer. If not specified, default is zero, which means that encoder will use a single slice for each frame. + Slices *int32 `json:"slices,omitempty"` + // AdaptiveBFrame - Whether or not adaptive B-frames are to be used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. + AdaptiveBFrame *bool `json:"adaptiveBFrame,omitempty"` + // Width - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input. + Width *string `json:"width,omitempty"` + // Height - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input. + Height *string `json:"height,omitempty"` + // Label - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeLayer', 'OdataTypeMicrosoftMediaVideoLayer', 'OdataTypeMicrosoftMediaH264Layer', 'OdataTypeMicrosoftMediaJpgLayer', 'OdataTypeMicrosoftMediaPngLayer' + OdataType OdataTypeBasicLayer `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for H264Layer. +func (hl H264Layer) MarshalJSON() ([]byte, error) { + hl.OdataType = OdataTypeMicrosoftMediaH264Layer + objectMap := make(map[string]interface{}) + if hl.Profile != "" { + objectMap["profile"] = hl.Profile + } + if hl.Level != nil { + objectMap["level"] = hl.Level + } + if hl.BufferWindow != nil { + objectMap["bufferWindow"] = hl.BufferWindow + } + if hl.ReferenceFrames != nil { + objectMap["referenceFrames"] = hl.ReferenceFrames + } + if hl.EntropyMode != "" { + objectMap["entropyMode"] = hl.EntropyMode + } + if hl.Bitrate != nil { + objectMap["bitrate"] = hl.Bitrate + } + if hl.MaxBitrate != nil { + objectMap["maxBitrate"] = hl.MaxBitrate + } + if hl.BFrames != nil { + objectMap["bFrames"] = hl.BFrames + } + if hl.FrameRate != nil { + objectMap["frameRate"] = hl.FrameRate + } + if hl.Slices != nil { + objectMap["slices"] = hl.Slices + } + if hl.AdaptiveBFrame != nil { + objectMap["adaptiveBFrame"] = hl.AdaptiveBFrame + } + if hl.Width != nil { + objectMap["width"] = hl.Width + } + if hl.Height != nil { + objectMap["height"] = hl.Height + } + if hl.Label != nil { + objectMap["label"] = hl.Label + } + if hl.OdataType != "" { + objectMap["@odata.type"] = hl.OdataType + } + return json.Marshal(objectMap) +} + +// AsVideoLayer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsVideoLayer() (*VideoLayer, bool) { + return nil, false +} + +// AsBasicVideoLayer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsBasicVideoLayer() (BasicVideoLayer, bool) { + return &hl, true +} + +// AsH264Layer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsH264Layer() (*H264Layer, bool) { + return &hl, true +} + +// AsJpgLayer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsJpgLayer() (*JpgLayer, bool) { + return nil, false +} + +// AsPngLayer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsPngLayer() (*PngLayer, bool) { + return nil, false +} + +// AsLayer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsLayer() (*Layer, bool) { + return nil, false +} + +// AsBasicLayer is the BasicLayer implementation for H264Layer. +func (hl H264Layer) AsBasicLayer() (BasicLayer, bool) { + return &hl, true +} + +// H264Video describes all the properties for encoding a video with the H.264 codec. +type H264Video struct { + // SceneChangeDetection - Whether or not the encoder should insert key frames at scene changes. If not specified, the default is false. This flag should be set to true only when the encoder is being configured to produce a single output video. + SceneChangeDetection *bool `json:"sceneChangeDetection,omitempty"` + // Complexity - Tells the encoder how to choose its encoding settings. The default value is Balanced. Possible values include: 'Speed', 'Balanced', 'Quality' + Complexity H264Complexity `json:"complexity,omitempty"` + // Layers - The collection of output H.264 layers to be produced by the encoder. + Layers *[]H264Layer `json:"layers,omitempty"` + // KeyFrameInterval - The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S). + KeyFrameInterval *string `json:"keyFrameInterval,omitempty"` + // StretchMode - The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize. Possible values include: 'StretchModeNone', 'StretchModeAutoSize', 'StretchModeAutoFit' + StretchMode StretchMode `json:"stretchMode,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for H264Video. +func (hv H264Video) MarshalJSON() ([]byte, error) { + hv.OdataType = OdataTypeMicrosoftMediaH264Video + objectMap := make(map[string]interface{}) + if hv.SceneChangeDetection != nil { + objectMap["sceneChangeDetection"] = hv.SceneChangeDetection + } + if hv.Complexity != "" { + objectMap["complexity"] = hv.Complexity + } + if hv.Layers != nil { + objectMap["layers"] = hv.Layers + } + if hv.KeyFrameInterval != nil { + objectMap["keyFrameInterval"] = hv.KeyFrameInterval + } + if hv.StretchMode != "" { + objectMap["stretchMode"] = hv.StretchMode + } + if hv.Label != nil { + objectMap["label"] = hv.Label + } + if hv.OdataType != "" { + objectMap["@odata.type"] = hv.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for H264Video. +func (hv H264Video) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for H264Video. +func (hv H264Video) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for H264Video. +func (hv H264Video) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for H264Video. +func (hv H264Video) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for H264Video. +func (hv H264Video) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for H264Video. +func (hv H264Video) AsBasicVideo() (BasicVideo, bool) { + return &hv, true +} + +// AsImage is the BasicCodec implementation for H264Video. +func (hv H264Video) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for H264Video. +func (hv H264Video) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for H264Video. +func (hv H264Video) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for H264Video. +func (hv H264Video) AsH264Video() (*H264Video, bool) { + return &hv, true +} + +// AsJpgImage is the BasicCodec implementation for H264Video. +func (hv H264Video) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for H264Video. +func (hv H264Video) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for H264Video. +func (hv H264Video) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for H264Video. +func (hv H264Video) AsBasicCodec() (BasicCodec, bool) { + return &hv, true +} + +// Hls the HLS configuration. +type Hls struct { + // FragmentsPerTsSegment - The amount of fragments per HTTP Live Streaming (HLS) segment. + FragmentsPerTsSegment *int32 `json:"fragmentsPerTsSegment,omitempty"` +} + +// BasicImage describes the basic properties for generating thumbnails from the input video +type BasicImage interface { + AsJpgImage() (*JpgImage, bool) + AsPngImage() (*PngImage, bool) + AsImage() (*Image, bool) +} + +// Image describes the basic properties for generating thumbnails from the input video +type Image struct { + // Start - The position in the input video from where to start generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT05S), or a frame count (For example, 10 for the 10th frame), or a relative value (For example, 1%). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video. + Start *string `json:"start,omitempty"` + // Step - The intervals at which thumbnails are generated. The value can be in absolute timestamp (ISO 8601, e.g: PT05S for one image every 5 seconds), or a frame count (For example, 30 for every 30 frames), or a relative value (For example, 1%). + Step *string `json:"step,omitempty"` + // Range - The position in the input video at which to stop generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT5M30S to stop at 5 minutes and 30 seconds), or a frame count (For example, 300 to stop at the 300th frame), or a relative value (For example, 100%). + Range *string `json:"range,omitempty"` + // KeyFrameInterval - The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S). + KeyFrameInterval *string `json:"keyFrameInterval,omitempty"` + // StretchMode - The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize. Possible values include: 'StretchModeNone', 'StretchModeAutoSize', 'StretchModeAutoFit' + StretchMode StretchMode `json:"stretchMode,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +func unmarshalBasicImage(body []byte) (BasicImage, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaJpgImage): + var ji JpgImage + err := json.Unmarshal(body, &ji) + return ji, err + case string(OdataTypeMicrosoftMediaPngImage): + var pi PngImage + err := json.Unmarshal(body, &pi) + return pi, err + default: + var i Image + err := json.Unmarshal(body, &i) + return i, err + } +} +func unmarshalBasicImageArray(body []byte) ([]BasicImage, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + iArray := make([]BasicImage, len(rawMessages)) + + for index, rawMessage := range rawMessages { + i, err := unmarshalBasicImage(*rawMessage) + if err != nil { + return nil, err + } + iArray[index] = i + } + return iArray, nil +} + +// MarshalJSON is the custom marshaler for Image. +func (i Image) MarshalJSON() ([]byte, error) { + i.OdataType = OdataTypeMicrosoftMediaImage + objectMap := make(map[string]interface{}) + if i.Start != nil { + objectMap["start"] = i.Start + } + if i.Step != nil { + objectMap["step"] = i.Step + } + if i.Range != nil { + objectMap["range"] = i.Range + } + if i.KeyFrameInterval != nil { + objectMap["keyFrameInterval"] = i.KeyFrameInterval + } + if i.StretchMode != "" { + objectMap["stretchMode"] = i.StretchMode + } + if i.Label != nil { + objectMap["label"] = i.Label + } + if i.OdataType != "" { + objectMap["@odata.type"] = i.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for Image. +func (i Image) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for Image. +func (i Image) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for Image. +func (i Image) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for Image. +func (i Image) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for Image. +func (i Image) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for Image. +func (i Image) AsBasicVideo() (BasicVideo, bool) { + return &i, true +} + +// AsImage is the BasicCodec implementation for Image. +func (i Image) AsImage() (*Image, bool) { + return &i, true +} + +// AsBasicImage is the BasicCodec implementation for Image. +func (i Image) AsBasicImage() (BasicImage, bool) { + return &i, true +} + +// AsCopyAudio is the BasicCodec implementation for Image. +func (i Image) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for Image. +func (i Image) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for Image. +func (i Image) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for Image. +func (i Image) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for Image. +func (i Image) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for Image. +func (i Image) AsBasicCodec() (BasicCodec, bool) { + return &i, true +} + +// BasicImageFormat describes the properties for an output image file. +type BasicImageFormat interface { + AsJpgFormat() (*JpgFormat, bool) + AsPngFormat() (*PngFormat, bool) + AsImageFormat() (*ImageFormat, bool) +} + +// ImageFormat describes the properties for an output image file. +type ImageFormat struct { + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +func unmarshalBasicImageFormat(body []byte) (BasicImageFormat, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaJpgFormat): + var jf JpgFormat + err := json.Unmarshal(body, &jf) + return jf, err + case string(OdataTypeMicrosoftMediaPngFormat): + var pf PngFormat + err := json.Unmarshal(body, &pf) + return pf, err + default: + var ifVar ImageFormat + err := json.Unmarshal(body, &ifVar) + return ifVar, err + } +} +func unmarshalBasicImageFormatArray(body []byte) ([]BasicImageFormat, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ifVarArray := make([]BasicImageFormat, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ifVar, err := unmarshalBasicImageFormat(*rawMessage) + if err != nil { + return nil, err + } + ifVarArray[index] = ifVar + } + return ifVarArray, nil +} + +// MarshalJSON is the custom marshaler for ImageFormat. +func (ifVar ImageFormat) MarshalJSON() ([]byte, error) { + ifVar.OdataType = OdataTypeMicrosoftMediaImageFormat + objectMap := make(map[string]interface{}) + if ifVar.FilenamePattern != nil { + objectMap["filenamePattern"] = ifVar.FilenamePattern + } + if ifVar.OdataType != "" { + objectMap["@odata.type"] = ifVar.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsImageFormat() (*ImageFormat, bool) { + return &ifVar, true +} + +// AsBasicImageFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsBasicImageFormat() (BasicImageFormat, bool) { + return &ifVar, true +} + +// AsJpgFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsJpgFormat() (*JpgFormat, bool) { + return nil, false +} + +// AsPngFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsPngFormat() (*PngFormat, bool) { + return nil, false +} + +// AsMultiBitrateFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return nil, false +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return nil, false +} + +// AsMp4Format is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsMp4Format() (*Mp4Format, bool) { + return nil, false +} + +// AsTransportStreamFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return nil, false +} + +// AsFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsFormat() (*Format, bool) { + return nil, false +} + +// AsBasicFormat is the BasicFormat implementation for ImageFormat. +func (ifVar ImageFormat) AsBasicFormat() (BasicFormat, bool) { + return &ifVar, true +} + +// IPAccessControl the IP access control. +type IPAccessControl struct { + // Allow - The IP allow list. + Allow *[]IPRange `json:"allow,omitempty"` +} + +// IPRange the IP address range in the CIDR scheme. +type IPRange struct { + // Name - The friendly name for the IP address range. + Name *string `json:"name,omitempty"` + // Address - The IP address. + Address *string `json:"address,omitempty"` + // SubnetPrefixLength - The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *int32 `json:"subnetPrefixLength,omitempty"` +} + +// Job a Job resource type. The progress and state can be obtained by polling a Job or subscribing to +// events using EventGrid. +type Job struct { + autorest.Response `json:"-"` + // JobProperties - The resource properties. + *JobProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Job. +func (j Job) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if j.JobProperties != nil { + objectMap["properties"] = j.JobProperties + } + if j.ID != nil { + objectMap["id"] = j.ID + } + if j.Name != nil { + objectMap["name"] = j.Name + } + if j.Type != nil { + objectMap["type"] = j.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Job struct. +func (j *Job) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var jobProperties JobProperties + err = json.Unmarshal(*v, &jobProperties) + if err != nil { + return err + } + j.JobProperties = &jobProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + j.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + j.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + j.Type = &typeVar + } + } + } + + return nil +} + +// JobCollection a collection of Job items. +type JobCollection struct { + autorest.Response `json:"-"` + // Value - A collection of Job items. + Value *[]Job `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// JobCollectionIterator provides access to a complete listing of Job values. +type JobCollectionIterator struct { + i int + page JobCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *JobCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *JobCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter JobCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter JobCollectionIterator) Response() JobCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter JobCollectionIterator) Value() Job { + if !iter.page.NotDone() { + return Job{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the JobCollectionIterator type. +func NewJobCollectionIterator(page JobCollectionPage) JobCollectionIterator { + return JobCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (jc JobCollection) IsEmpty() bool { + return jc.Value == nil || len(*jc.Value) == 0 +} + +// jobCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (jc JobCollection) jobCollectionPreparer(ctx context.Context) (*http.Request, error) { + if jc.OdataNextLink == nil || len(to.String(jc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(jc.OdataNextLink))) +} + +// JobCollectionPage contains a page of Job values. +type JobCollectionPage struct { + fn func(context.Context, JobCollection) (JobCollection, error) + jc JobCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *JobCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.jc) + if err != nil { + return err + } + page.jc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *JobCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page JobCollectionPage) NotDone() bool { + return !page.jc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page JobCollectionPage) Response() JobCollection { + return page.jc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page JobCollectionPage) Values() []Job { + if page.jc.IsEmpty() { + return nil + } + return *page.jc.Value +} + +// Creates a new instance of the JobCollectionPage type. +func NewJobCollectionPage(getNextPage func(context.Context, JobCollection) (JobCollection, error)) JobCollectionPage { + return JobCollectionPage{fn: getNextPage} +} + +// JobError details of JobOutput errors. +type JobError struct { + // Code - Error code describing the error. Possible values include: 'ServiceError', 'ServiceTransientError', 'DownloadNotAccessible', 'DownloadTransientError', 'UploadNotAccessible', 'UploadTransientError', 'ConfigurationUnsupported', 'ContentMalformed', 'ContentUnsupported' + Code JobErrorCode `json:"code,omitempty"` + // Message - A human-readable language-dependent representation of the error. + Message *string `json:"message,omitempty"` + // Category - Helps with categorization of errors. Possible values include: 'JobErrorCategoryService', 'JobErrorCategoryDownload', 'JobErrorCategoryUpload', 'JobErrorCategoryConfiguration', 'JobErrorCategoryContent' + Category JobErrorCategory `json:"category,omitempty"` + // Retry - Indicates that it may be possible to retry the Job. If retry is unsuccessful, please contact Azure support via Azure Portal. Possible values include: 'DoNotRetry', 'MayRetry' + Retry JobRetry `json:"retry,omitempty"` + // Details - An array of details about specific errors that led to this reported error. + Details *[]JobErrorDetail `json:"details,omitempty"` +} + +// JobErrorDetail details of JobOutput errors. +type JobErrorDetail struct { + // Code - Code describing the error detail. + Code *string `json:"code,omitempty"` + // Message - A human-readable representation of the error. + Message *string `json:"message,omitempty"` +} + +// BasicJobInput base class for inputs to a Job. +type BasicJobInput interface { + AsJobInputClip() (*JobInputClip, bool) + AsBasicJobInputClip() (BasicJobInputClip, bool) + AsJobInputs() (*JobInputs, bool) + AsJobInputAsset() (*JobInputAsset, bool) + AsJobInputHTTP() (*JobInputHTTP, bool) + AsJobInput() (*JobInput, bool) +} + +// JobInput base class for inputs to a Job. +type JobInput struct { + // OdataType - Possible values include: 'OdataTypeJobInput', 'OdataTypeMicrosoftMediaJobInputClip', 'OdataTypeMicrosoftMediaJobInputs', 'OdataTypeMicrosoftMediaJobInputAsset', 'OdataTypeMicrosoftMediaJobInputHTTP' + OdataType OdataTypeBasicJobInput `json:"@odata.type,omitempty"` +} + +func unmarshalBasicJobInput(body []byte) (BasicJobInput, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaJobInputClip): + var jic JobInputClip + err := json.Unmarshal(body, &jic) + return jic, err + case string(OdataTypeMicrosoftMediaJobInputs): + var ji JobInputs + err := json.Unmarshal(body, &ji) + return ji, err + case string(OdataTypeMicrosoftMediaJobInputAsset): + var jia JobInputAsset + err := json.Unmarshal(body, &jia) + return jia, err + case string(OdataTypeMicrosoftMediaJobInputHTTP): + var jih JobInputHTTP + err := json.Unmarshal(body, &jih) + return jih, err + default: + var ji JobInput + err := json.Unmarshal(body, &ji) + return ji, err + } +} +func unmarshalBasicJobInputArray(body []byte) ([]BasicJobInput, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + jiArray := make([]BasicJobInput, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ji, err := unmarshalBasicJobInput(*rawMessage) + if err != nil { + return nil, err + } + jiArray[index] = ji + } + return jiArray, nil +} + +// MarshalJSON is the custom marshaler for JobInput. +func (ji JobInput) MarshalJSON() ([]byte, error) { + ji.OdataType = OdataTypeJobInput + objectMap := make(map[string]interface{}) + if ji.OdataType != "" { + objectMap["@odata.type"] = ji.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobInputClip is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsJobInputClip() (*JobInputClip, bool) { + return nil, false +} + +// AsBasicJobInputClip is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsBasicJobInputClip() (BasicJobInputClip, bool) { + return nil, false +} + +// AsJobInputs is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsJobInputs() (*JobInputs, bool) { + return nil, false +} + +// AsJobInputAsset is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsJobInputAsset() (*JobInputAsset, bool) { + return nil, false +} + +// AsJobInputHTTP is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsJobInputHTTP() (*JobInputHTTP, bool) { + return nil, false +} + +// AsJobInput is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsJobInput() (*JobInput, bool) { + return &ji, true +} + +// AsBasicJobInput is the BasicJobInput implementation for JobInput. +func (ji JobInput) AsBasicJobInput() (BasicJobInput, bool) { + return &ji, true +} + +// JobInputAsset represents an Asset for input into a Job. +type JobInputAsset struct { + // AssetName - The name of the input Asset. + AssetName *string `json:"assetName,omitempty"` + // Files - List of files. Required for JobInputHttp. + Files *[]string `json:"files,omitempty"` + // Label - A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeJobInput', 'OdataTypeMicrosoftMediaJobInputClip', 'OdataTypeMicrosoftMediaJobInputs', 'OdataTypeMicrosoftMediaJobInputAsset', 'OdataTypeMicrosoftMediaJobInputHTTP' + OdataType OdataTypeBasicJobInput `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JobInputAsset. +func (jia JobInputAsset) MarshalJSON() ([]byte, error) { + jia.OdataType = OdataTypeMicrosoftMediaJobInputAsset + objectMap := make(map[string]interface{}) + if jia.AssetName != nil { + objectMap["assetName"] = jia.AssetName + } + if jia.Files != nil { + objectMap["files"] = jia.Files + } + if jia.Label != nil { + objectMap["label"] = jia.Label + } + if jia.OdataType != "" { + objectMap["@odata.type"] = jia.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobInputClip is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsJobInputClip() (*JobInputClip, bool) { + return nil, false +} + +// AsBasicJobInputClip is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsBasicJobInputClip() (BasicJobInputClip, bool) { + return &jia, true +} + +// AsJobInputs is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsJobInputs() (*JobInputs, bool) { + return nil, false +} + +// AsJobInputAsset is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsJobInputAsset() (*JobInputAsset, bool) { + return &jia, true +} + +// AsJobInputHTTP is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsJobInputHTTP() (*JobInputHTTP, bool) { + return nil, false +} + +// AsJobInput is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsJobInput() (*JobInput, bool) { + return nil, false +} + +// AsBasicJobInput is the BasicJobInput implementation for JobInputAsset. +func (jia JobInputAsset) AsBasicJobInput() (BasicJobInput, bool) { + return &jia, true +} + +// BasicJobInputClip represents input files for a Job. +type BasicJobInputClip interface { + AsJobInputAsset() (*JobInputAsset, bool) + AsJobInputHTTP() (*JobInputHTTP, bool) + AsJobInputClip() (*JobInputClip, bool) +} + +// JobInputClip represents input files for a Job. +type JobInputClip struct { + // Files - List of files. Required for JobInputHttp. + Files *[]string `json:"files,omitempty"` + // Label - A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeJobInput', 'OdataTypeMicrosoftMediaJobInputClip', 'OdataTypeMicrosoftMediaJobInputs', 'OdataTypeMicrosoftMediaJobInputAsset', 'OdataTypeMicrosoftMediaJobInputHTTP' + OdataType OdataTypeBasicJobInput `json:"@odata.type,omitempty"` +} + +func unmarshalBasicJobInputClip(body []byte) (BasicJobInputClip, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaJobInputAsset): + var jia JobInputAsset + err := json.Unmarshal(body, &jia) + return jia, err + case string(OdataTypeMicrosoftMediaJobInputHTTP): + var jih JobInputHTTP + err := json.Unmarshal(body, &jih) + return jih, err + default: + var jic JobInputClip + err := json.Unmarshal(body, &jic) + return jic, err + } +} +func unmarshalBasicJobInputClipArray(body []byte) ([]BasicJobInputClip, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + jicArray := make([]BasicJobInputClip, len(rawMessages)) + + for index, rawMessage := range rawMessages { + jic, err := unmarshalBasicJobInputClip(*rawMessage) + if err != nil { + return nil, err + } + jicArray[index] = jic + } + return jicArray, nil +} + +// MarshalJSON is the custom marshaler for JobInputClip. +func (jic JobInputClip) MarshalJSON() ([]byte, error) { + jic.OdataType = OdataTypeMicrosoftMediaJobInputClip + objectMap := make(map[string]interface{}) + if jic.Files != nil { + objectMap["files"] = jic.Files + } + if jic.Label != nil { + objectMap["label"] = jic.Label + } + if jic.OdataType != "" { + objectMap["@odata.type"] = jic.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobInputClip is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsJobInputClip() (*JobInputClip, bool) { + return &jic, true +} + +// AsBasicJobInputClip is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsBasicJobInputClip() (BasicJobInputClip, bool) { + return &jic, true +} + +// AsJobInputs is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsJobInputs() (*JobInputs, bool) { + return nil, false +} + +// AsJobInputAsset is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsJobInputAsset() (*JobInputAsset, bool) { + return nil, false +} + +// AsJobInputHTTP is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsJobInputHTTP() (*JobInputHTTP, bool) { + return nil, false +} + +// AsJobInput is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsJobInput() (*JobInput, bool) { + return nil, false +} + +// AsBasicJobInput is the BasicJobInput implementation for JobInputClip. +func (jic JobInputClip) AsBasicJobInput() (BasicJobInput, bool) { + return &jic, true +} + +// JobInputHTTP represents HTTPS job input. +type JobInputHTTP struct { + // BaseURI - Base URI for HTTPS job input. It will be concatenated with provided file names. If no base uri is given, then the provided file list is assumed to be fully qualified uris. + BaseURI *string `json:"baseUri,omitempty"` + // Files - List of files. Required for JobInputHttp. + Files *[]string `json:"files,omitempty"` + // Label - A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeJobInput', 'OdataTypeMicrosoftMediaJobInputClip', 'OdataTypeMicrosoftMediaJobInputs', 'OdataTypeMicrosoftMediaJobInputAsset', 'OdataTypeMicrosoftMediaJobInputHTTP' + OdataType OdataTypeBasicJobInput `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JobInputHTTP. +func (jih JobInputHTTP) MarshalJSON() ([]byte, error) { + jih.OdataType = OdataTypeMicrosoftMediaJobInputHTTP + objectMap := make(map[string]interface{}) + if jih.BaseURI != nil { + objectMap["baseUri"] = jih.BaseURI + } + if jih.Files != nil { + objectMap["files"] = jih.Files + } + if jih.Label != nil { + objectMap["label"] = jih.Label + } + if jih.OdataType != "" { + objectMap["@odata.type"] = jih.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobInputClip is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsJobInputClip() (*JobInputClip, bool) { + return nil, false +} + +// AsBasicJobInputClip is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsBasicJobInputClip() (BasicJobInputClip, bool) { + return &jih, true +} + +// AsJobInputs is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsJobInputs() (*JobInputs, bool) { + return nil, false +} + +// AsJobInputAsset is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsJobInputAsset() (*JobInputAsset, bool) { + return nil, false +} + +// AsJobInputHTTP is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsJobInputHTTP() (*JobInputHTTP, bool) { + return &jih, true +} + +// AsJobInput is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsJobInput() (*JobInput, bool) { + return nil, false +} + +// AsBasicJobInput is the BasicJobInput implementation for JobInputHTTP. +func (jih JobInputHTTP) AsBasicJobInput() (BasicJobInput, bool) { + return &jih, true +} + +// JobInputs describes a list of inputs to a Job. +type JobInputs struct { + // Inputs - List of inputs to a Job. + Inputs *[]BasicJobInput `json:"inputs,omitempty"` + // OdataType - Possible values include: 'OdataTypeJobInput', 'OdataTypeMicrosoftMediaJobInputClip', 'OdataTypeMicrosoftMediaJobInputs', 'OdataTypeMicrosoftMediaJobInputAsset', 'OdataTypeMicrosoftMediaJobInputHTTP' + OdataType OdataTypeBasicJobInput `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JobInputs. +func (ji JobInputs) MarshalJSON() ([]byte, error) { + ji.OdataType = OdataTypeMicrosoftMediaJobInputs + objectMap := make(map[string]interface{}) + if ji.Inputs != nil { + objectMap["inputs"] = ji.Inputs + } + if ji.OdataType != "" { + objectMap["@odata.type"] = ji.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobInputClip is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsJobInputClip() (*JobInputClip, bool) { + return nil, false +} + +// AsBasicJobInputClip is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsBasicJobInputClip() (BasicJobInputClip, bool) { + return nil, false +} + +// AsJobInputs is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsJobInputs() (*JobInputs, bool) { + return &ji, true +} + +// AsJobInputAsset is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsJobInputAsset() (*JobInputAsset, bool) { + return nil, false +} + +// AsJobInputHTTP is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsJobInputHTTP() (*JobInputHTTP, bool) { + return nil, false +} + +// AsJobInput is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsJobInput() (*JobInput, bool) { + return nil, false +} + +// AsBasicJobInput is the BasicJobInput implementation for JobInputs. +func (ji JobInputs) AsBasicJobInput() (BasicJobInput, bool) { + return &ji, true +} + +// UnmarshalJSON is the custom unmarshaler for JobInputs struct. +func (ji *JobInputs) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "inputs": + if v != nil { + inputs, err := unmarshalBasicJobInputArray(*v) + if err != nil { + return err + } + ji.Inputs = &inputs + } + case "@odata.type": + if v != nil { + var odataType OdataTypeBasicJobInput + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + ji.OdataType = odataType + } + } + } + + return nil +} + +// BasicJobOutput describes all the properties of a JobOutput. +type BasicJobOutput interface { + AsJobOutputAsset() (*JobOutputAsset, bool) + AsJobOutput() (*JobOutput, bool) +} + +// JobOutput describes all the properties of a JobOutput. +type JobOutput struct { + // Error - If the JobOutput is in the Error state, it contains the details of the error. + Error *JobError `json:"error,omitempty"` + // State - Describes the state of the JobOutput. Possible values include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', 'Queued', 'Scheduled' + State JobState `json:"state,omitempty"` + // Progress - If the JobOutput is in a Processing state, this contains the Job completion percentage. The value is an estimate and not intended to be used to predict Job completion times. To determine if the JobOutput is complete, use the State property. + Progress *int32 `json:"progress,omitempty"` + // Label - A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeJobOutput', 'OdataTypeMicrosoftMediaJobOutputAsset' + OdataType OdataTypeBasicJobOutput `json:"@odata.type,omitempty"` +} + +func unmarshalBasicJobOutput(body []byte) (BasicJobOutput, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaJobOutputAsset): + var joa JobOutputAsset + err := json.Unmarshal(body, &joa) + return joa, err + default: + var jo JobOutput + err := json.Unmarshal(body, &jo) + return jo, err + } +} +func unmarshalBasicJobOutputArray(body []byte) ([]BasicJobOutput, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + joArray := make([]BasicJobOutput, len(rawMessages)) + + for index, rawMessage := range rawMessages { + jo, err := unmarshalBasicJobOutput(*rawMessage) + if err != nil { + return nil, err + } + joArray[index] = jo + } + return joArray, nil +} + +// MarshalJSON is the custom marshaler for JobOutput. +func (jo JobOutput) MarshalJSON() ([]byte, error) { + jo.OdataType = OdataTypeJobOutput + objectMap := make(map[string]interface{}) + if jo.Error != nil { + objectMap["error"] = jo.Error + } + if jo.State != "" { + objectMap["state"] = jo.State + } + if jo.Progress != nil { + objectMap["progress"] = jo.Progress + } + if jo.Label != nil { + objectMap["label"] = jo.Label + } + if jo.OdataType != "" { + objectMap["@odata.type"] = jo.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobOutputAsset is the BasicJobOutput implementation for JobOutput. +func (jo JobOutput) AsJobOutputAsset() (*JobOutputAsset, bool) { + return nil, false +} + +// AsJobOutput is the BasicJobOutput implementation for JobOutput. +func (jo JobOutput) AsJobOutput() (*JobOutput, bool) { + return &jo, true +} + +// AsBasicJobOutput is the BasicJobOutput implementation for JobOutput. +func (jo JobOutput) AsBasicJobOutput() (BasicJobOutput, bool) { + return &jo, true +} + +// JobOutputAsset represents an Asset used as a JobOutput. +type JobOutputAsset struct { + // AssetName - The name of the output Asset. + AssetName *string `json:"assetName,omitempty"` + // Error - If the JobOutput is in the Error state, it contains the details of the error. + Error *JobError `json:"error,omitempty"` + // State - Describes the state of the JobOutput. Possible values include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', 'Queued', 'Scheduled' + State JobState `json:"state,omitempty"` + // Progress - If the JobOutput is in a Processing state, this contains the Job completion percentage. The value is an estimate and not intended to be used to predict Job completion times. To determine if the JobOutput is complete, use the State property. + Progress *int32 `json:"progress,omitempty"` + // Label - A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeJobOutput', 'OdataTypeMicrosoftMediaJobOutputAsset' + OdataType OdataTypeBasicJobOutput `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JobOutputAsset. +func (joa JobOutputAsset) MarshalJSON() ([]byte, error) { + joa.OdataType = OdataTypeMicrosoftMediaJobOutputAsset + objectMap := make(map[string]interface{}) + if joa.AssetName != nil { + objectMap["assetName"] = joa.AssetName + } + if joa.Error != nil { + objectMap["error"] = joa.Error + } + if joa.State != "" { + objectMap["state"] = joa.State + } + if joa.Progress != nil { + objectMap["progress"] = joa.Progress + } + if joa.Label != nil { + objectMap["label"] = joa.Label + } + if joa.OdataType != "" { + objectMap["@odata.type"] = joa.OdataType + } + return json.Marshal(objectMap) +} + +// AsJobOutputAsset is the BasicJobOutput implementation for JobOutputAsset. +func (joa JobOutputAsset) AsJobOutputAsset() (*JobOutputAsset, bool) { + return &joa, true +} + +// AsJobOutput is the BasicJobOutput implementation for JobOutputAsset. +func (joa JobOutputAsset) AsJobOutput() (*JobOutput, bool) { + return nil, false +} + +// AsBasicJobOutput is the BasicJobOutput implementation for JobOutputAsset. +func (joa JobOutputAsset) AsBasicJobOutput() (BasicJobOutput, bool) { + return &joa, true +} + +// JobProperties properties of the Job. +type JobProperties struct { + // Created - The UTC date and time when the Job was created, in 'YYYY-MM-DDThh:mm:ssZ' format. + Created *date.Time `json:"created,omitempty"` + // State - The current state of the job. Possible values include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing', 'Queued', 'Scheduled' + State JobState `json:"state,omitempty"` + // Description - Optional customer supplied description of the Job. + Description *string `json:"description,omitempty"` + // Input - The inputs for the Job. + Input BasicJobInput `json:"input,omitempty"` + // LastModified - The UTC date and time when the Job was last updated, in 'YYYY-MM-DDThh:mm:ssZ' format. + LastModified *date.Time `json:"lastModified,omitempty"` + // Outputs - The outputs for the Job. + Outputs *[]BasicJobOutput `json:"outputs,omitempty"` + // Priority - Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. If not set, the default is normal. Possible values include: 'Low', 'Normal', 'High' + Priority Priority `json:"priority,omitempty"` + // CorrelationData - Customer provided correlation data that will be returned in Job and JobOutput state events. + CorrelationData map[string]*string `json:"correlationData"` +} + +// MarshalJSON is the custom marshaler for JobProperties. +func (jp JobProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if jp.Created != nil { + objectMap["created"] = jp.Created + } + if jp.State != "" { + objectMap["state"] = jp.State + } + if jp.Description != nil { + objectMap["description"] = jp.Description + } + objectMap["input"] = jp.Input + if jp.LastModified != nil { + objectMap["lastModified"] = jp.LastModified + } + if jp.Outputs != nil { + objectMap["outputs"] = jp.Outputs + } + if jp.Priority != "" { + objectMap["priority"] = jp.Priority + } + if jp.CorrelationData != nil { + objectMap["correlationData"] = jp.CorrelationData + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for JobProperties struct. +func (jp *JobProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "created": + if v != nil { + var created date.Time + err = json.Unmarshal(*v, &created) + if err != nil { + return err + } + jp.Created = &created + } + case "state": + if v != nil { + var state JobState + err = json.Unmarshal(*v, &state) + if err != nil { + return err + } + jp.State = state + } + case "description": + if v != nil { + var description string + err = json.Unmarshal(*v, &description) + if err != nil { + return err + } + jp.Description = &description + } + case "input": + if v != nil { + input, err := unmarshalBasicJobInput(*v) + if err != nil { + return err + } + jp.Input = input + } + case "lastModified": + if v != nil { + var lastModified date.Time + err = json.Unmarshal(*v, &lastModified) + if err != nil { + return err + } + jp.LastModified = &lastModified + } + case "outputs": + if v != nil { + outputs, err := unmarshalBasicJobOutputArray(*v) + if err != nil { + return err + } + jp.Outputs = &outputs + } + case "priority": + if v != nil { + var priority Priority + err = json.Unmarshal(*v, &priority) + if err != nil { + return err + } + jp.Priority = priority + } + case "correlationData": + if v != nil { + var correlationData map[string]*string + err = json.Unmarshal(*v, &correlationData) + if err != nil { + return err + } + jp.CorrelationData = correlationData + } + } + } + + return nil +} + +// JpgFormat describes the settings for producing JPEG thumbnails. +type JpgFormat struct { + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JpgFormat. +func (jf JpgFormat) MarshalJSON() ([]byte, error) { + jf.OdataType = OdataTypeMicrosoftMediaJpgFormat + objectMap := make(map[string]interface{}) + if jf.FilenamePattern != nil { + objectMap["filenamePattern"] = jf.FilenamePattern + } + if jf.OdataType != "" { + objectMap["@odata.type"] = jf.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsImageFormat() (*ImageFormat, bool) { + return nil, false +} + +// AsBasicImageFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsBasicImageFormat() (BasicImageFormat, bool) { + return &jf, true +} + +// AsJpgFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsJpgFormat() (*JpgFormat, bool) { + return &jf, true +} + +// AsPngFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsPngFormat() (*PngFormat, bool) { + return nil, false +} + +// AsMultiBitrateFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return nil, false +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return nil, false +} + +// AsMp4Format is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsMp4Format() (*Mp4Format, bool) { + return nil, false +} + +// AsTransportStreamFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return nil, false +} + +// AsFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsFormat() (*Format, bool) { + return nil, false +} + +// AsBasicFormat is the BasicFormat implementation for JpgFormat. +func (jf JpgFormat) AsBasicFormat() (BasicFormat, bool) { + return &jf, true +} + +// JpgImage describes the properties for producing a series of JPEG images from the input video. +type JpgImage struct { + // Layers - A collection of output JPEG image layers to be produced by the encoder. + Layers *[]JpgLayer `json:"layers,omitempty"` + // Start - The position in the input video from where to start generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT05S), or a frame count (For example, 10 for the 10th frame), or a relative value (For example, 1%). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video. + Start *string `json:"start,omitempty"` + // Step - The intervals at which thumbnails are generated. The value can be in absolute timestamp (ISO 8601, e.g: PT05S for one image every 5 seconds), or a frame count (For example, 30 for every 30 frames), or a relative value (For example, 1%). + Step *string `json:"step,omitempty"` + // Range - The position in the input video at which to stop generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT5M30S to stop at 5 minutes and 30 seconds), or a frame count (For example, 300 to stop at the 300th frame), or a relative value (For example, 100%). + Range *string `json:"range,omitempty"` + // KeyFrameInterval - The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S). + KeyFrameInterval *string `json:"keyFrameInterval,omitempty"` + // StretchMode - The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize. Possible values include: 'StretchModeNone', 'StretchModeAutoSize', 'StretchModeAutoFit' + StretchMode StretchMode `json:"stretchMode,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JpgImage. +func (ji JpgImage) MarshalJSON() ([]byte, error) { + ji.OdataType = OdataTypeMicrosoftMediaJpgImage + objectMap := make(map[string]interface{}) + if ji.Layers != nil { + objectMap["layers"] = ji.Layers + } + if ji.Start != nil { + objectMap["start"] = ji.Start + } + if ji.Step != nil { + objectMap["step"] = ji.Step + } + if ji.Range != nil { + objectMap["range"] = ji.Range + } + if ji.KeyFrameInterval != nil { + objectMap["keyFrameInterval"] = ji.KeyFrameInterval + } + if ji.StretchMode != "" { + objectMap["stretchMode"] = ji.StretchMode + } + if ji.Label != nil { + objectMap["label"] = ji.Label + } + if ji.OdataType != "" { + objectMap["@odata.type"] = ji.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsBasicVideo() (BasicVideo, bool) { + return &ji, true +} + +// AsImage is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsBasicImage() (BasicImage, bool) { + return &ji, true +} + +// AsCopyAudio is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsJpgImage() (*JpgImage, bool) { + return &ji, true +} + +// AsPngImage is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for JpgImage. +func (ji JpgImage) AsBasicCodec() (BasicCodec, bool) { + return &ji, true +} + +// JpgLayer describes the settings to produce a JPEG image from the input video. +type JpgLayer struct { + // Quality - The compression quality of the JPEG output. Range is from 0-100 and the default is 70. + Quality *int32 `json:"quality,omitempty"` + // Width - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input. + Width *string `json:"width,omitempty"` + // Height - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input. + Height *string `json:"height,omitempty"` + // Label - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeLayer', 'OdataTypeMicrosoftMediaVideoLayer', 'OdataTypeMicrosoftMediaH264Layer', 'OdataTypeMicrosoftMediaJpgLayer', 'OdataTypeMicrosoftMediaPngLayer' + OdataType OdataTypeBasicLayer `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for JpgLayer. +func (jl JpgLayer) MarshalJSON() ([]byte, error) { + jl.OdataType = OdataTypeMicrosoftMediaJpgLayer + objectMap := make(map[string]interface{}) + if jl.Quality != nil { + objectMap["quality"] = jl.Quality + } + if jl.Width != nil { + objectMap["width"] = jl.Width + } + if jl.Height != nil { + objectMap["height"] = jl.Height + } + if jl.Label != nil { + objectMap["label"] = jl.Label + } + if jl.OdataType != "" { + objectMap["@odata.type"] = jl.OdataType + } + return json.Marshal(objectMap) +} + +// AsVideoLayer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsVideoLayer() (*VideoLayer, bool) { + return nil, false +} + +// AsBasicVideoLayer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsBasicVideoLayer() (BasicVideoLayer, bool) { + return nil, false +} + +// AsH264Layer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsH264Layer() (*H264Layer, bool) { + return nil, false +} + +// AsJpgLayer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsJpgLayer() (*JpgLayer, bool) { + return &jl, true +} + +// AsPngLayer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsPngLayer() (*PngLayer, bool) { + return nil, false +} + +// AsLayer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsLayer() (*Layer, bool) { + return nil, false +} + +// AsBasicLayer is the BasicLayer implementation for JpgLayer. +func (jl JpgLayer) AsBasicLayer() (BasicLayer, bool) { + return &jl, true +} + +// BasicLayer the encoder can be configured to produce video and/or images (thumbnails) at different resolutions, by +// specifying a layer for each desired resolution. A layer represents the properties for the video or image at a +// resolution. +type BasicLayer interface { + AsVideoLayer() (*VideoLayer, bool) + AsBasicVideoLayer() (BasicVideoLayer, bool) + AsH264Layer() (*H264Layer, bool) + AsJpgLayer() (*JpgLayer, bool) + AsPngLayer() (*PngLayer, bool) + AsLayer() (*Layer, bool) +} + +// Layer the encoder can be configured to produce video and/or images (thumbnails) at different resolutions, by +// specifying a layer for each desired resolution. A layer represents the properties for the video or image at +// a resolution. +type Layer struct { + // Width - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input. + Width *string `json:"width,omitempty"` + // Height - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input. + Height *string `json:"height,omitempty"` + // Label - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeLayer', 'OdataTypeMicrosoftMediaVideoLayer', 'OdataTypeMicrosoftMediaH264Layer', 'OdataTypeMicrosoftMediaJpgLayer', 'OdataTypeMicrosoftMediaPngLayer' + OdataType OdataTypeBasicLayer `json:"@odata.type,omitempty"` +} + +func unmarshalBasicLayer(body []byte) (BasicLayer, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaVideoLayer): + var vl VideoLayer + err := json.Unmarshal(body, &vl) + return vl, err + case string(OdataTypeMicrosoftMediaH264Layer): + var hl H264Layer + err := json.Unmarshal(body, &hl) + return hl, err + case string(OdataTypeMicrosoftMediaJpgLayer): + var jl JpgLayer + err := json.Unmarshal(body, &jl) + return jl, err + case string(OdataTypeMicrosoftMediaPngLayer): + var pl PngLayer + err := json.Unmarshal(body, &pl) + return pl, err + default: + var l Layer + err := json.Unmarshal(body, &l) + return l, err + } +} +func unmarshalBasicLayerArray(body []byte) ([]BasicLayer, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + lArray := make([]BasicLayer, len(rawMessages)) + + for index, rawMessage := range rawMessages { + l, err := unmarshalBasicLayer(*rawMessage) + if err != nil { + return nil, err + } + lArray[index] = l + } + return lArray, nil +} + +// MarshalJSON is the custom marshaler for Layer. +func (l Layer) MarshalJSON() ([]byte, error) { + l.OdataType = OdataTypeLayer + objectMap := make(map[string]interface{}) + if l.Width != nil { + objectMap["width"] = l.Width + } + if l.Height != nil { + objectMap["height"] = l.Height + } + if l.Label != nil { + objectMap["label"] = l.Label + } + if l.OdataType != "" { + objectMap["@odata.type"] = l.OdataType + } + return json.Marshal(objectMap) +} + +// AsVideoLayer is the BasicLayer implementation for Layer. +func (l Layer) AsVideoLayer() (*VideoLayer, bool) { + return nil, false +} + +// AsBasicVideoLayer is the BasicLayer implementation for Layer. +func (l Layer) AsBasicVideoLayer() (BasicVideoLayer, bool) { + return nil, false +} + +// AsH264Layer is the BasicLayer implementation for Layer. +func (l Layer) AsH264Layer() (*H264Layer, bool) { + return nil, false +} + +// AsJpgLayer is the BasicLayer implementation for Layer. +func (l Layer) AsJpgLayer() (*JpgLayer, bool) { + return nil, false +} + +// AsPngLayer is the BasicLayer implementation for Layer. +func (l Layer) AsPngLayer() (*PngLayer, bool) { + return nil, false +} + +// AsLayer is the BasicLayer implementation for Layer. +func (l Layer) AsLayer() (*Layer, bool) { + return &l, true +} + +// AsBasicLayer is the BasicLayer implementation for Layer. +func (l Layer) AsBasicLayer() (BasicLayer, bool) { + return &l, true +} + +// ListContainerSasInput the parameters to the list SAS request. +type ListContainerSasInput struct { + // Permissions - The permissions to set on the SAS URL. Possible values include: 'Read', 'ReadWrite', 'ReadWriteDelete' + Permissions AssetContainerPermission `json:"permissions,omitempty"` + // ExpiryTime - The SAS URL expiration time. This must be less than 24 hours from the current time. + ExpiryTime *date.Time `json:"expiryTime,omitempty"` +} + +// ListContentKeysResponse class of response for listContentKeys action +type ListContentKeysResponse struct { + autorest.Response `json:"-"` + // ContentKeys - ContentKeys used by current Streaming Locator + ContentKeys *[]StreamingLocatorContentKey `json:"contentKeys,omitempty"` +} + +// ListPathsResponse class of response for listPaths action +type ListPathsResponse struct { + autorest.Response `json:"-"` + // StreamingPaths - Streaming Paths supported by current Streaming Locator + StreamingPaths *[]StreamingPath `json:"streamingPaths,omitempty"` + // DownloadPaths - Download Paths supported by current Streaming Locator + DownloadPaths *[]string `json:"downloadPaths,omitempty"` +} + +// ListStreamingLocatorsResponse the Streaming Locators associated with this Asset. +type ListStreamingLocatorsResponse struct { + autorest.Response `json:"-"` + // StreamingLocators - The list of Streaming Locators. + StreamingLocators *[]AssetStreamingLocator `json:"streamingLocators,omitempty"` +} + +// LiveEvent the Live Event. +type LiveEvent struct { + autorest.Response `json:"-"` + // LiveEventProperties - The Live Event properties. + *LiveEventProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The Azure Region of the resource. + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for LiveEvent. +func (le LiveEvent) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if le.LiveEventProperties != nil { + objectMap["properties"] = le.LiveEventProperties + } + if le.Tags != nil { + objectMap["tags"] = le.Tags + } + if le.Location != nil { + objectMap["location"] = le.Location + } + if le.ID != nil { + objectMap["id"] = le.ID + } + if le.Name != nil { + objectMap["name"] = le.Name + } + if le.Type != nil { + objectMap["type"] = le.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LiveEvent struct. +func (le *LiveEvent) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var liveEventProperties LiveEventProperties + err = json.Unmarshal(*v, &liveEventProperties) + if err != nil { + return err + } + le.LiveEventProperties = &liveEventProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + le.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + le.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + le.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + le.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + le.Type = &typeVar + } + } + } + + return nil +} + +// LiveEventActionInput the LiveEvent action input parameter definition. +type LiveEventActionInput struct { + // RemoveOutputsOnStop - The flag indicates if remove LiveOutputs on Stop. + RemoveOutputsOnStop *bool `json:"removeOutputsOnStop,omitempty"` +} + +// LiveEventEncoding the Live Event encoding. +type LiveEventEncoding struct { + // EncodingType - The encoding type for Live Event. This value is specified at creation time and cannot be updated. Possible values include: 'LiveEventEncodingTypeNone', 'LiveEventEncodingTypeBasic', 'LiveEventEncodingTypeStandard' + EncodingType LiveEventEncodingType `json:"encodingType,omitempty"` + // PresetName - The encoding preset name. This value is specified at creation time and cannot be updated. + PresetName *string `json:"presetName,omitempty"` +} + +// LiveEventEndpoint the Live Event endpoint. +type LiveEventEndpoint struct { + // Protocol - The endpoint protocol. + Protocol *string `json:"protocol,omitempty"` + // URL - The endpoint URL. + URL *string `json:"url,omitempty"` +} + +// LiveEventInput the Live Event input. +type LiveEventInput struct { + // StreamingProtocol - The streaming protocol for the Live Event. This is specified at creation time and cannot be updated. Possible values include: 'FragmentedMP4', 'RTMP' + StreamingProtocol LiveEventInputProtocol `json:"streamingProtocol,omitempty"` + // AccessControl - The access control for LiveEvent Input. + AccessControl *LiveEventInputAccessControl `json:"accessControl,omitempty"` + // KeyFrameIntervalDuration - ISO 8601 timespan duration of the key frame interval duration. + KeyFrameIntervalDuration *string `json:"keyFrameIntervalDuration,omitempty"` + // AccessToken - A unique identifier for a stream. This can be specified at creation time but cannot be updated. If omitted, the service will generate a unique value. + AccessToken *string `json:"accessToken,omitempty"` + // Endpoints - The input endpoints for the Live Event. + Endpoints *[]LiveEventEndpoint `json:"endpoints,omitempty"` +} + +// LiveEventInputAccessControl the IP access control for Live Event Input. +type LiveEventInputAccessControl struct { + // IP - The IP access control properties. + IP *IPAccessControl `json:"ip,omitempty"` +} + +// LiveEventListResult the LiveEvent list result. +type LiveEventListResult struct { + autorest.Response `json:"-"` + // Value - The result of the List Live Event operation. + Value *[]LiveEvent `json:"value,omitempty"` + // OdataCount - The number of result. + OdataCount *int32 `json:"@odata.count,omitempty"` + // OdataNextLink - Th link to the next set of results. Not empty if value contains incomplete list of Live Outputs. + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// LiveEventListResultIterator provides access to a complete listing of LiveEvent values. +type LiveEventListResultIterator struct { + i int + page LiveEventListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *LiveEventListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *LiveEventListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter LiveEventListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter LiveEventListResultIterator) Response() LiveEventListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter LiveEventListResultIterator) Value() LiveEvent { + if !iter.page.NotDone() { + return LiveEvent{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the LiveEventListResultIterator type. +func NewLiveEventListResultIterator(page LiveEventListResultPage) LiveEventListResultIterator { + return LiveEventListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lelr LiveEventListResult) IsEmpty() bool { + return lelr.Value == nil || len(*lelr.Value) == 0 +} + +// liveEventListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lelr LiveEventListResult) liveEventListResultPreparer(ctx context.Context) (*http.Request, error) { + if lelr.OdataNextLink == nil || len(to.String(lelr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lelr.OdataNextLink))) +} + +// LiveEventListResultPage contains a page of LiveEvent values. +type LiveEventListResultPage struct { + fn func(context.Context, LiveEventListResult) (LiveEventListResult, error) + lelr LiveEventListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *LiveEventListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveEventListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lelr) + if err != nil { + return err + } + page.lelr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *LiveEventListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page LiveEventListResultPage) NotDone() bool { + return !page.lelr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page LiveEventListResultPage) Response() LiveEventListResult { + return page.lelr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page LiveEventListResultPage) Values() []LiveEvent { + if page.lelr.IsEmpty() { + return nil + } + return *page.lelr.Value +} + +// Creates a new instance of the LiveEventListResultPage type. +func NewLiveEventListResultPage(getNextPage func(context.Context, LiveEventListResult) (LiveEventListResult, error)) LiveEventListResultPage { + return LiveEventListResultPage{fn: getNextPage} +} + +// LiveEventPreview the Live Event preview. +type LiveEventPreview struct { + // Endpoints - The endpoints for preview. + Endpoints *[]LiveEventEndpoint `json:"endpoints,omitempty"` + // AccessControl - The access control for LiveEvent preview. + AccessControl *LiveEventPreviewAccessControl `json:"accessControl,omitempty"` + // PreviewLocator - The identifier of the preview locator in Guid format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. This value cannot be updated once the live event is created. + PreviewLocator *string `json:"previewLocator,omitempty"` + // StreamingPolicyName - The name of streaming policy used for the LiveEvent preview. This value is specified at creation time and cannot be updated. + StreamingPolicyName *string `json:"streamingPolicyName,omitempty"` + // AlternativeMediaID - An Alternative Media Identifier associated with the StreamingLocator created for the preview. This value is specified at creation time and cannot be updated. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the CustomKeyAcquisitionUrlTemplate of the StreamingPolicy specified in the StreamingPolicyName field. + AlternativeMediaID *string `json:"alternativeMediaId,omitempty"` +} + +// LiveEventPreviewAccessControl the IP access control for Live Event preview. +type LiveEventPreviewAccessControl struct { + // IP - The IP access control properties. + IP *IPAccessControl `json:"ip,omitempty"` +} + +// LiveEventProperties the Live Event properties. +type LiveEventProperties struct { + // Description - The Live Event description. + Description *string `json:"description,omitempty"` + // Input - The Live Event input. + Input *LiveEventInput `json:"input,omitempty"` + // Preview - The Live Event preview. + Preview *LiveEventPreview `json:"preview,omitempty"` + // Encoding - The Live Event encoding. + Encoding *LiveEventEncoding `json:"encoding,omitempty"` + // ProvisioningState - The provisioning state of the Live Event. + ProvisioningState *string `json:"provisioningState,omitempty"` + // ResourceState - The resource state of the Live Event. Possible values include: 'Stopped', 'Starting', 'Running', 'Stopping', 'Deleting' + ResourceState LiveEventResourceState `json:"resourceState,omitempty"` + // CrossSiteAccessPolicies - The Live Event access policies. + CrossSiteAccessPolicies *CrossSiteAccessPolicies `json:"crossSiteAccessPolicies,omitempty"` + // VanityURL - Specifies whether to use a vanity url with the Live Event. This value is specified at creation time and cannot be updated. + VanityURL *bool `json:"vanityUrl,omitempty"` + // StreamOptions - The options to use for the LiveEvent. This value is specified at creation time and cannot be updated. + StreamOptions *[]StreamOptionsFlag `json:"streamOptions,omitempty"` + // Created - The exact time the Live Event was created. + Created *date.Time `json:"created,omitempty"` + // LastModified - The exact time the Live Event was last modified. + LastModified *date.Time `json:"lastModified,omitempty"` +} + +// LiveEventsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveEventsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveEventsCreateFuture) Result(client LiveEventsClient) (le LiveEvent, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveEventsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if le.Response.Response, err = future.GetResult(sender); err == nil && le.Response.Response.StatusCode != http.StatusNoContent { + le, err = client.CreateResponder(le.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsCreateFuture", "Result", le.Response.Response, "Failure responding to request") + } + } + return +} + +// LiveEventsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveEventsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveEventsDeleteFuture) Result(client LiveEventsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveEventsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// LiveEventsResetFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveEventsResetFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveEventsResetFuture) Result(client LiveEventsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsResetFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveEventsResetFuture") + return + } + ar.Response = future.Response() + return +} + +// LiveEventsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveEventsStartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveEventsStartFuture) Result(client LiveEventsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveEventsStartFuture") + return + } + ar.Response = future.Response() + return +} + +// LiveEventsStopFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveEventsStopFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveEventsStopFuture) Result(client LiveEventsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveEventsStopFuture") + return + } + ar.Response = future.Response() + return +} + +// LiveEventsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveEventsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveEventsUpdateFuture) Result(client LiveEventsClient) (le LiveEvent, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveEventsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if le.Response.Response, err = future.GetResult(sender); err == nil && le.Response.Response.StatusCode != http.StatusNoContent { + le, err = client.UpdateResponder(le.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveEventsUpdateFuture", "Result", le.Response.Response, "Failure responding to request") + } + } + return +} + +// LiveOutput the Live Output. +type LiveOutput struct { + autorest.Response `json:"-"` + // LiveOutputProperties - The Live Output properties. + *LiveOutputProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for LiveOutput. +func (lo LiveOutput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lo.LiveOutputProperties != nil { + objectMap["properties"] = lo.LiveOutputProperties + } + if lo.ID != nil { + objectMap["id"] = lo.ID + } + if lo.Name != nil { + objectMap["name"] = lo.Name + } + if lo.Type != nil { + objectMap["type"] = lo.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LiveOutput struct. +func (lo *LiveOutput) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var liveOutputProperties LiveOutputProperties + err = json.Unmarshal(*v, &liveOutputProperties) + if err != nil { + return err + } + lo.LiveOutputProperties = &liveOutputProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lo.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lo.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lo.Type = &typeVar + } + } + } + + return nil +} + +// LiveOutputListResult the LiveOutput list result. +type LiveOutputListResult struct { + autorest.Response `json:"-"` + // Value - The result of the List Live Output operation. + Value *[]LiveOutput `json:"value,omitempty"` + // OdataCount - The number of result. + OdataCount *int32 `json:"@odata.count,omitempty"` + // OdataNextLink - Th link to the next set of results. Not empty if value contains incomplete list of Live Outputs. + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// LiveOutputListResultIterator provides access to a complete listing of LiveOutput values. +type LiveOutputListResultIterator struct { + i int + page LiveOutputListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *LiveOutputListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *LiveOutputListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter LiveOutputListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter LiveOutputListResultIterator) Response() LiveOutputListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter LiveOutputListResultIterator) Value() LiveOutput { + if !iter.page.NotDone() { + return LiveOutput{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the LiveOutputListResultIterator type. +func NewLiveOutputListResultIterator(page LiveOutputListResultPage) LiveOutputListResultIterator { + return LiveOutputListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lolr LiveOutputListResult) IsEmpty() bool { + return lolr.Value == nil || len(*lolr.Value) == 0 +} + +// liveOutputListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lolr LiveOutputListResult) liveOutputListResultPreparer(ctx context.Context) (*http.Request, error) { + if lolr.OdataNextLink == nil || len(to.String(lolr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lolr.OdataNextLink))) +} + +// LiveOutputListResultPage contains a page of LiveOutput values. +type LiveOutputListResultPage struct { + fn func(context.Context, LiveOutputListResult) (LiveOutputListResult, error) + lolr LiveOutputListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *LiveOutputListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LiveOutputListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lolr) + if err != nil { + return err + } + page.lolr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *LiveOutputListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page LiveOutputListResultPage) NotDone() bool { + return !page.lolr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page LiveOutputListResultPage) Response() LiveOutputListResult { + return page.lolr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page LiveOutputListResultPage) Values() []LiveOutput { + if page.lolr.IsEmpty() { + return nil + } + return *page.lolr.Value +} + +// Creates a new instance of the LiveOutputListResultPage type. +func NewLiveOutputListResultPage(getNextPage func(context.Context, LiveOutputListResult) (LiveOutputListResult, error)) LiveOutputListResultPage { + return LiveOutputListResultPage{fn: getNextPage} +} + +// LiveOutputProperties the JSON object that contains the properties required to create a Live Output. +type LiveOutputProperties struct { + // Description - The description of the Live Output. + Description *string `json:"description,omitempty"` + // AssetName - The asset name. + AssetName *string `json:"assetName,omitempty"` + // ArchiveWindowLength - ISO 8601 timespan duration of the archive window length. This is duration that customer want to retain the recorded content. + ArchiveWindowLength *string `json:"archiveWindowLength,omitempty"` + // ManifestName - The manifest file name. If not provided, the service will generate one automatically. + ManifestName *string `json:"manifestName,omitempty"` + // Hls - The HLS configuration. + Hls *Hls `json:"hls,omitempty"` + // OutputSnapTime - The output snapshot time. + OutputSnapTime *int64 `json:"outputSnapTime,omitempty"` + // Created - The exact time the Live Output was created. + Created *date.Time `json:"created,omitempty"` + // LastModified - The exact time the Live Output was last modified. + LastModified *date.Time `json:"lastModified,omitempty"` + // ProvisioningState - The provisioning state of the Live Output. + ProvisioningState *string `json:"provisioningState,omitempty"` + // ResourceState - The resource state of the Live Output. Possible values include: 'LiveOutputResourceStateCreating', 'LiveOutputResourceStateRunning', 'LiveOutputResourceStateDeleting' + ResourceState LiveOutputResourceState `json:"resourceState,omitempty"` +} + +// LiveOutputsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveOutputsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveOutputsCreateFuture) Result(client LiveOutputsClient) (lo LiveOutput, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveOutputsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lo.Response.Response, err = future.GetResult(sender); err == nil && lo.Response.Response.StatusCode != http.StatusNoContent { + lo, err = client.CreateResponder(lo.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsCreateFuture", "Result", lo.Response.Response, "Failure responding to request") + } + } + return +} + +// LiveOutputsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LiveOutputsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LiveOutputsDeleteFuture) Result(client LiveOutputsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.LiveOutputsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.LiveOutputsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// Location ... +type Location struct { + Name *string `json:"name,omitempty"` +} + +// Metric a metric emitted by service. +type Metric struct { + // Name - The metric name. + Name *string `json:"name,omitempty"` + // DisplayName - The metric display name. + DisplayName *string `json:"displayName,omitempty"` + // DisplayDescription - The metric display description. + DisplayDescription *string `json:"displayDescription,omitempty"` + // Unit - The metric unit. Possible values include: 'MetricUnitBytes', 'MetricUnitCount', 'MetricUnitMilliseconds' + Unit MetricUnit `json:"unit,omitempty"` + // AggregationType - The metric aggregation type. Possible values include: 'Average', 'Count', 'Total' + AggregationType MetricAggregationType `json:"aggregationType,omitempty"` + // Dimensions - The metric dimensions. + Dimensions *[]MetricDimension `json:"dimensions,omitempty"` +} + +// MetricDimension a metric dimension. +type MetricDimension struct { + // Name - The metric dimension name. + Name *string `json:"name,omitempty"` + // DisplayName - The display name for the dimension. + DisplayName *string `json:"displayName,omitempty"` + // ToBeExportedForShoebox - Whether to export metric to shoebox. + ToBeExportedForShoebox *bool `json:"toBeExportedForShoebox,omitempty"` +} + +// MetricProperties metric properties. +type MetricProperties struct { + // ServiceSpecification - The service specifications. + ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` +} + +// Mp4Format describes the properties for an output ISO MP4 file. +type Mp4Format struct { + // OutputFiles - The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . + OutputFiles *[]OutputFile `json:"outputFiles,omitempty"` + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Mp4Format. +func (m4f Mp4Format) MarshalJSON() ([]byte, error) { + m4f.OdataType = OdataTypeMicrosoftMediaMp4Format + objectMap := make(map[string]interface{}) + if m4f.OutputFiles != nil { + objectMap["outputFiles"] = m4f.OutputFiles + } + if m4f.FilenamePattern != nil { + objectMap["filenamePattern"] = m4f.FilenamePattern + } + if m4f.OdataType != "" { + objectMap["@odata.type"] = m4f.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsImageFormat() (*ImageFormat, bool) { + return nil, false +} + +// AsBasicImageFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsBasicImageFormat() (BasicImageFormat, bool) { + return nil, false +} + +// AsJpgFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsJpgFormat() (*JpgFormat, bool) { + return nil, false +} + +// AsPngFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsPngFormat() (*PngFormat, bool) { + return nil, false +} + +// AsMultiBitrateFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return nil, false +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return &m4f, true +} + +// AsMp4Format is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsMp4Format() (*Mp4Format, bool) { + return &m4f, true +} + +// AsTransportStreamFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return nil, false +} + +// AsFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsFormat() (*Format, bool) { + return nil, false +} + +// AsBasicFormat is the BasicFormat implementation for Mp4Format. +func (m4f Mp4Format) AsBasicFormat() (BasicFormat, bool) { + return &m4f, true +} + +// BasicMultiBitrateFormat describes the properties for producing a collection of GOP aligned multi-bitrate files. The +// default behavior is to produce one output file for each video layer which is muxed together with all the audios. The +// exact output files produced can be controlled by specifying the outputFiles collection. +type BasicMultiBitrateFormat interface { + AsMp4Format() (*Mp4Format, bool) + AsTransportStreamFormat() (*TransportStreamFormat, bool) + AsMultiBitrateFormat() (*MultiBitrateFormat, bool) +} + +// MultiBitrateFormat describes the properties for producing a collection of GOP aligned multi-bitrate files. +// The default behavior is to produce one output file for each video layer which is muxed together with all the +// audios. The exact output files produced can be controlled by specifying the outputFiles collection. +type MultiBitrateFormat struct { + // OutputFiles - The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . + OutputFiles *[]OutputFile `json:"outputFiles,omitempty"` + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +func unmarshalBasicMultiBitrateFormat(body []byte) (BasicMultiBitrateFormat, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaMp4Format): + var m4f Mp4Format + err := json.Unmarshal(body, &m4f) + return m4f, err + case string(OdataTypeMicrosoftMediaTransportStreamFormat): + var tsf TransportStreamFormat + err := json.Unmarshal(body, &tsf) + return tsf, err + default: + var mbf MultiBitrateFormat + err := json.Unmarshal(body, &mbf) + return mbf, err + } +} +func unmarshalBasicMultiBitrateFormatArray(body []byte) ([]BasicMultiBitrateFormat, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + mbfArray := make([]BasicMultiBitrateFormat, len(rawMessages)) + + for index, rawMessage := range rawMessages { + mbf, err := unmarshalBasicMultiBitrateFormat(*rawMessage) + if err != nil { + return nil, err + } + mbfArray[index] = mbf + } + return mbfArray, nil +} + +// MarshalJSON is the custom marshaler for MultiBitrateFormat. +func (mbf MultiBitrateFormat) MarshalJSON() ([]byte, error) { + mbf.OdataType = OdataTypeMicrosoftMediaMultiBitrateFormat + objectMap := make(map[string]interface{}) + if mbf.OutputFiles != nil { + objectMap["outputFiles"] = mbf.OutputFiles + } + if mbf.FilenamePattern != nil { + objectMap["filenamePattern"] = mbf.FilenamePattern + } + if mbf.OdataType != "" { + objectMap["@odata.type"] = mbf.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsImageFormat() (*ImageFormat, bool) { + return nil, false +} + +// AsBasicImageFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsBasicImageFormat() (BasicImageFormat, bool) { + return nil, false +} + +// AsJpgFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsJpgFormat() (*JpgFormat, bool) { + return nil, false +} + +// AsPngFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsPngFormat() (*PngFormat, bool) { + return nil, false +} + +// AsMultiBitrateFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return &mbf, true +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return &mbf, true +} + +// AsMp4Format is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsMp4Format() (*Mp4Format, bool) { + return nil, false +} + +// AsTransportStreamFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return nil, false +} + +// AsFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsFormat() (*Format, bool) { + return nil, false +} + +// AsBasicFormat is the BasicFormat implementation for MultiBitrateFormat. +func (mbf MultiBitrateFormat) AsBasicFormat() (BasicFormat, bool) { + return &mbf, true +} + +// NoEncryption class for NoEncryption scheme +type NoEncryption struct { + // EnabledProtocols - Representing supported protocols + EnabledProtocols *EnabledProtocols `json:"enabledProtocols,omitempty"` +} + +// ODataError information about an error. +type ODataError struct { + // Code - A language-independent error name. + Code *string `json:"code,omitempty"` + // Message - The error message. + Message *string `json:"message,omitempty"` + // Target - The target of the error (for example, the name of the property in error). + Target *string `json:"target,omitempty"` + // Details - The error details. + Details *[]ODataError `json:"details,omitempty"` +} + +// Operation an operation. +type Operation struct { + // Name - The operation name. + Name *string `json:"name,omitempty"` + // Display - The operation display name. + Display *OperationDisplay `json:"display,omitempty"` + // Origin - Origin of the operation. + Origin *string `json:"origin,omitempty"` + // Properties - Operation properties format. + Properties *MetricProperties `json:"properties,omitempty"` +} + +// OperationCollection a collection of Operation items. +type OperationCollection struct { + autorest.Response `json:"-"` + // Value - A collection of Operation items. + Value *[]Operation `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// OperationCollectionIterator provides access to a complete listing of Operation values. +type OperationCollectionIterator struct { + i int + page OperationCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *OperationCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *OperationCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter OperationCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter OperationCollectionIterator) Response() OperationCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter OperationCollectionIterator) Value() Operation { + if !iter.page.NotDone() { + return Operation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the OperationCollectionIterator type. +func NewOperationCollectionIterator(page OperationCollectionPage) OperationCollectionIterator { + return OperationCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (oc OperationCollection) IsEmpty() bool { + return oc.Value == nil || len(*oc.Value) == 0 +} + +// operationCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (oc OperationCollection) operationCollectionPreparer(ctx context.Context) (*http.Request, error) { + if oc.OdataNextLink == nil || len(to.String(oc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(oc.OdataNextLink))) +} + +// OperationCollectionPage contains a page of Operation values. +type OperationCollectionPage struct { + fn func(context.Context, OperationCollection) (OperationCollection, error) + oc OperationCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *OperationCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.oc) + if err != nil { + return err + } + page.oc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *OperationCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page OperationCollectionPage) NotDone() bool { + return !page.oc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page OperationCollectionPage) Response() OperationCollection { + return page.oc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page OperationCollectionPage) Values() []Operation { + if page.oc.IsEmpty() { + return nil + } + return *page.oc.Value +} + +// Creates a new instance of the OperationCollectionPage type. +func NewOperationCollectionPage(getNextPage func(context.Context, OperationCollection) (OperationCollection, error)) OperationCollectionPage { + return OperationCollectionPage{fn: getNextPage} +} + +// OperationDisplay operation details. +type OperationDisplay struct { + // Provider - The service provider. + Provider *string `json:"provider,omitempty"` + // Resource - Resource on which the operation is performed. + Resource *string `json:"resource,omitempty"` + // Operation - The operation type. + Operation *string `json:"operation,omitempty"` + // Description - The operation description. + Description *string `json:"description,omitempty"` +} + +// OutputFile represents an output file produced. +type OutputFile struct { + // Labels - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like '[v1, a1]' tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + Labels *[]string `json:"labels,omitempty"` +} + +// BasicOverlay base type for all overlays - image, audio or video. +type BasicOverlay interface { + AsAudioOverlay() (*AudioOverlay, bool) + AsVideoOverlay() (*VideoOverlay, bool) + AsOverlay() (*Overlay, bool) +} + +// Overlay base type for all overlays - image, audio or video. +type Overlay struct { + // InputLabel - The label of the job input which is to be used as an overlay. The Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. + InputLabel *string `json:"inputLabel,omitempty"` + // Start - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds in to the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty"` + // End - The position in the input video at which the overlay ends. The value should be in ISO 8601 duration format. For example, PT30S to end the overlay at 30 seconds in to the input video. If not specified the overlay will be applied until the end of the input video if inputLoop is true. Else, if inputLoop is false, then overlay will last as long as the duration of the overlay media. + End *string `json:"end,omitempty"` + // FadeInDuration - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty"` + // FadeOutDuration - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty"` + // AudioGainLevel - The gain level of audio in the overlay. The value should be in the range [0, 1.0]. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty"` + // OdataType - Possible values include: 'OdataTypeOverlay', 'OdataTypeMicrosoftMediaAudioOverlay', 'OdataTypeMicrosoftMediaVideoOverlay' + OdataType OdataTypeBasicOverlay `json:"@odata.type,omitempty"` +} + +func unmarshalBasicOverlay(body []byte) (BasicOverlay, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaAudioOverlay): + var ao AudioOverlay + err := json.Unmarshal(body, &ao) + return ao, err + case string(OdataTypeMicrosoftMediaVideoOverlay): + var vo VideoOverlay + err := json.Unmarshal(body, &vo) + return vo, err + default: + var o Overlay + err := json.Unmarshal(body, &o) + return o, err + } +} +func unmarshalBasicOverlayArray(body []byte) ([]BasicOverlay, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + oArray := make([]BasicOverlay, len(rawMessages)) + + for index, rawMessage := range rawMessages { + o, err := unmarshalBasicOverlay(*rawMessage) + if err != nil { + return nil, err + } + oArray[index] = o + } + return oArray, nil +} + +// MarshalJSON is the custom marshaler for Overlay. +func (o Overlay) MarshalJSON() ([]byte, error) { + o.OdataType = OdataTypeOverlay + objectMap := make(map[string]interface{}) + if o.InputLabel != nil { + objectMap["inputLabel"] = o.InputLabel + } + if o.Start != nil { + objectMap["start"] = o.Start + } + if o.End != nil { + objectMap["end"] = o.End + } + if o.FadeInDuration != nil { + objectMap["fadeInDuration"] = o.FadeInDuration + } + if o.FadeOutDuration != nil { + objectMap["fadeOutDuration"] = o.FadeOutDuration + } + if o.AudioGainLevel != nil { + objectMap["audioGainLevel"] = o.AudioGainLevel + } + if o.OdataType != "" { + objectMap["@odata.type"] = o.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioOverlay is the BasicOverlay implementation for Overlay. +func (o Overlay) AsAudioOverlay() (*AudioOverlay, bool) { + return nil, false +} + +// AsVideoOverlay is the BasicOverlay implementation for Overlay. +func (o Overlay) AsVideoOverlay() (*VideoOverlay, bool) { + return nil, false +} + +// AsOverlay is the BasicOverlay implementation for Overlay. +func (o Overlay) AsOverlay() (*Overlay, bool) { + return &o, true +} + +// AsBasicOverlay is the BasicOverlay implementation for Overlay. +func (o Overlay) AsBasicOverlay() (BasicOverlay, bool) { + return &o, true +} + +// PngFormat describes the settings for producing PNG thumbnails. +type PngFormat struct { + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for PngFormat. +func (pf PngFormat) MarshalJSON() ([]byte, error) { + pf.OdataType = OdataTypeMicrosoftMediaPngFormat + objectMap := make(map[string]interface{}) + if pf.FilenamePattern != nil { + objectMap["filenamePattern"] = pf.FilenamePattern + } + if pf.OdataType != "" { + objectMap["@odata.type"] = pf.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsImageFormat() (*ImageFormat, bool) { + return nil, false +} + +// AsBasicImageFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsBasicImageFormat() (BasicImageFormat, bool) { + return &pf, true +} + +// AsJpgFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsJpgFormat() (*JpgFormat, bool) { + return nil, false +} + +// AsPngFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsPngFormat() (*PngFormat, bool) { + return &pf, true +} + +// AsMultiBitrateFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return nil, false +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return nil, false +} + +// AsMp4Format is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsMp4Format() (*Mp4Format, bool) { + return nil, false +} + +// AsTransportStreamFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return nil, false +} + +// AsFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsFormat() (*Format, bool) { + return nil, false +} + +// AsBasicFormat is the BasicFormat implementation for PngFormat. +func (pf PngFormat) AsBasicFormat() (BasicFormat, bool) { + return &pf, true +} + +// PngImage describes the properties for producing a series of PNG images from the input video. +type PngImage struct { + // Layers - A collection of output PNG image layers to be produced by the encoder. + Layers *[]PngLayer `json:"layers,omitempty"` + // Start - The position in the input video from where to start generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT05S), or a frame count (For example, 10 for the 10th frame), or a relative value (For example, 1%). Also supports a macro {Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video. + Start *string `json:"start,omitempty"` + // Step - The intervals at which thumbnails are generated. The value can be in absolute timestamp (ISO 8601, e.g: PT05S for one image every 5 seconds), or a frame count (For example, 30 for every 30 frames), or a relative value (For example, 1%). + Step *string `json:"step,omitempty"` + // Range - The position in the input video at which to stop generating thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT5M30S to stop at 5 minutes and 30 seconds), or a frame count (For example, 300 to stop at the 300th frame), or a relative value (For example, 100%). + Range *string `json:"range,omitempty"` + // KeyFrameInterval - The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S). + KeyFrameInterval *string `json:"keyFrameInterval,omitempty"` + // StretchMode - The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize. Possible values include: 'StretchModeNone', 'StretchModeAutoSize', 'StretchModeAutoFit' + StretchMode StretchMode `json:"stretchMode,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for PngImage. +func (pi PngImage) MarshalJSON() ([]byte, error) { + pi.OdataType = OdataTypeMicrosoftMediaPngImage + objectMap := make(map[string]interface{}) + if pi.Layers != nil { + objectMap["layers"] = pi.Layers + } + if pi.Start != nil { + objectMap["start"] = pi.Start + } + if pi.Step != nil { + objectMap["step"] = pi.Step + } + if pi.Range != nil { + objectMap["range"] = pi.Range + } + if pi.KeyFrameInterval != nil { + objectMap["keyFrameInterval"] = pi.KeyFrameInterval + } + if pi.StretchMode != "" { + objectMap["stretchMode"] = pi.StretchMode + } + if pi.Label != nil { + objectMap["label"] = pi.Label + } + if pi.OdataType != "" { + objectMap["@odata.type"] = pi.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for PngImage. +func (pi PngImage) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for PngImage. +func (pi PngImage) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for PngImage. +func (pi PngImage) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for PngImage. +func (pi PngImage) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for PngImage. +func (pi PngImage) AsVideo() (*Video, bool) { + return nil, false +} + +// AsBasicVideo is the BasicCodec implementation for PngImage. +func (pi PngImage) AsBasicVideo() (BasicVideo, bool) { + return &pi, true +} + +// AsImage is the BasicCodec implementation for PngImage. +func (pi PngImage) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for PngImage. +func (pi PngImage) AsBasicImage() (BasicImage, bool) { + return &pi, true +} + +// AsCopyAudio is the BasicCodec implementation for PngImage. +func (pi PngImage) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for PngImage. +func (pi PngImage) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for PngImage. +func (pi PngImage) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for PngImage. +func (pi PngImage) AsPngImage() (*PngImage, bool) { + return &pi, true +} + +// AsCodec is the BasicCodec implementation for PngImage. +func (pi PngImage) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for PngImage. +func (pi PngImage) AsBasicCodec() (BasicCodec, bool) { + return &pi, true +} + +// PngLayer describes the settings to produce a PNG image from the input video. +type PngLayer struct { + // Width - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input. + Width *string `json:"width,omitempty"` + // Height - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input. + Height *string `json:"height,omitempty"` + // Label - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeLayer', 'OdataTypeMicrosoftMediaVideoLayer', 'OdataTypeMicrosoftMediaH264Layer', 'OdataTypeMicrosoftMediaJpgLayer', 'OdataTypeMicrosoftMediaPngLayer' + OdataType OdataTypeBasicLayer `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for PngLayer. +func (pl PngLayer) MarshalJSON() ([]byte, error) { + pl.OdataType = OdataTypeMicrosoftMediaPngLayer + objectMap := make(map[string]interface{}) + if pl.Width != nil { + objectMap["width"] = pl.Width + } + if pl.Height != nil { + objectMap["height"] = pl.Height + } + if pl.Label != nil { + objectMap["label"] = pl.Label + } + if pl.OdataType != "" { + objectMap["@odata.type"] = pl.OdataType + } + return json.Marshal(objectMap) +} + +// AsVideoLayer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsVideoLayer() (*VideoLayer, bool) { + return nil, false +} + +// AsBasicVideoLayer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsBasicVideoLayer() (BasicVideoLayer, bool) { + return nil, false +} + +// AsH264Layer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsH264Layer() (*H264Layer, bool) { + return nil, false +} + +// AsJpgLayer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsJpgLayer() (*JpgLayer, bool) { + return nil, false +} + +// AsPngLayer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsPngLayer() (*PngLayer, bool) { + return &pl, true +} + +// AsLayer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsLayer() (*Layer, bool) { + return nil, false +} + +// AsBasicLayer is the BasicLayer implementation for PngLayer. +func (pl PngLayer) AsBasicLayer() (BasicLayer, bool) { + return &pl, true +} + +// PresentationTimeRange the presentation time range, this is asset related and not recommended for Account +// Filter. +type PresentationTimeRange struct { + // StartTimestamp - The absolute start time boundary. + StartTimestamp *int64 `json:"startTimestamp,omitempty"` + // EndTimestamp - The absolute end time boundary. + EndTimestamp *int64 `json:"endTimestamp,omitempty"` + // PresentationWindowDuration - The relative to end sliding window. + PresentationWindowDuration *int64 `json:"presentationWindowDuration,omitempty"` + // LiveBackoffDuration - The relative to end right edge. + LiveBackoffDuration *int64 `json:"liveBackoffDuration,omitempty"` + // Timescale - The time scale of time stamps. + Timescale *int64 `json:"timescale,omitempty"` + // ForceEndTimestamp - The indicator of forcing exsiting of end time stamp. + ForceEndTimestamp *bool `json:"forceEndTimestamp,omitempty"` +} + +// BasicPreset base type for all Presets, which define the recipe or instructions on how the input media files should +// be processed. +type BasicPreset interface { + AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) + AsBasicAudioAnalyzerPreset() (BasicAudioAnalyzerPreset, bool) + AsBuiltInStandardEncoderPreset() (*BuiltInStandardEncoderPreset, bool) + AsStandardEncoderPreset() (*StandardEncoderPreset, bool) + AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) + AsPreset() (*Preset, bool) +} + +// Preset base type for all Presets, which define the recipe or instructions on how the input media files +// should be processed. +type Preset struct { + // OdataType - Possible values include: 'OdataTypePreset', 'OdataTypeMicrosoftMediaAudioAnalyzerPreset', 'OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset', 'OdataTypeMicrosoftMediaStandardEncoderPreset', 'OdataTypeMicrosoftMediaVideoAnalyzerPreset' + OdataType OdataTypeBasicPreset `json:"@odata.type,omitempty"` +} + +func unmarshalBasicPreset(body []byte) (BasicPreset, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaAudioAnalyzerPreset): + var aap AudioAnalyzerPreset + err := json.Unmarshal(body, &aap) + return aap, err + case string(OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset): + var bisep BuiltInStandardEncoderPreset + err := json.Unmarshal(body, &bisep) + return bisep, err + case string(OdataTypeMicrosoftMediaStandardEncoderPreset): + var sep StandardEncoderPreset + err := json.Unmarshal(body, &sep) + return sep, err + case string(OdataTypeMicrosoftMediaVideoAnalyzerPreset): + var vap VideoAnalyzerPreset + err := json.Unmarshal(body, &vap) + return vap, err + default: + var p Preset + err := json.Unmarshal(body, &p) + return p, err + } +} +func unmarshalBasicPresetArray(body []byte) ([]BasicPreset, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + pArray := make([]BasicPreset, len(rawMessages)) + + for index, rawMessage := range rawMessages { + p, err := unmarshalBasicPreset(*rawMessage) + if err != nil { + return nil, err + } + pArray[index] = p + } + return pArray, nil +} + +// MarshalJSON is the custom marshaler for Preset. +func (p Preset) MarshalJSON() ([]byte, error) { + p.OdataType = OdataTypePreset + objectMap := make(map[string]interface{}) + if p.OdataType != "" { + objectMap["@odata.type"] = p.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioAnalyzerPreset is the BasicPreset implementation for Preset. +func (p Preset) AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBasicAudioAnalyzerPreset is the BasicPreset implementation for Preset. +func (p Preset) AsBasicAudioAnalyzerPreset() (BasicAudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBuiltInStandardEncoderPreset is the BasicPreset implementation for Preset. +func (p Preset) AsBuiltInStandardEncoderPreset() (*BuiltInStandardEncoderPreset, bool) { + return nil, false +} + +// AsStandardEncoderPreset is the BasicPreset implementation for Preset. +func (p Preset) AsStandardEncoderPreset() (*StandardEncoderPreset, bool) { + return nil, false +} + +// AsVideoAnalyzerPreset is the BasicPreset implementation for Preset. +func (p Preset) AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) { + return nil, false +} + +// AsPreset is the BasicPreset implementation for Preset. +func (p Preset) AsPreset() (*Preset, bool) { + return &p, true +} + +// AsBasicPreset is the BasicPreset implementation for Preset. +func (p Preset) AsBasicPreset() (BasicPreset, bool) { + return &p, true +} + +// Provider a resource provider. +type Provider struct { + // ProviderName - The provider name. + ProviderName *string `json:"providerName,omitempty"` +} + +// ProxyResource the resource model definition for a ARM proxy resource. +type ProxyResource struct { + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// Rectangle describes the properties of a rectangular window applied to the input media before processing +// it. +type Rectangle struct { + // Left - The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty"` + // Top - The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty"` + // Width - The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty"` + // Height - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty"` +} + +// Resource the core properties of ARM resources. +type Resource struct { + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// Service a Media Services account. +type Service struct { + autorest.Response `json:"-"` + // ServiceProperties - The resource properties. + *ServiceProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The Azure Region of the resource. + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Service. +func (s Service) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if s.ServiceProperties != nil { + objectMap["properties"] = s.ServiceProperties + } + if s.Tags != nil { + objectMap["tags"] = s.Tags + } + if s.Location != nil { + objectMap["location"] = s.Location + } + if s.ID != nil { + objectMap["id"] = s.ID + } + if s.Name != nil { + objectMap["name"] = s.Name + } + if s.Type != nil { + objectMap["type"] = s.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Service struct. +func (s *Service) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var serviceProperties ServiceProperties + err = json.Unmarshal(*v, &serviceProperties) + if err != nil { + return err + } + s.ServiceProperties = &serviceProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + s.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + s.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + s.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + s.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + s.Type = &typeVar + } + } + } + + return nil +} + +// ServiceCollection a collection of MediaService items. +type ServiceCollection struct { + autorest.Response `json:"-"` + // Value - A collection of MediaService items. + Value *[]Service `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// ServiceCollectionIterator provides access to a complete listing of Service values. +type ServiceCollectionIterator struct { + i int + page ServiceCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ServiceCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ServiceCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ServiceCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ServiceCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ServiceCollectionIterator) Response() ServiceCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ServiceCollectionIterator) Value() Service { + if !iter.page.NotDone() { + return Service{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ServiceCollectionIterator type. +func NewServiceCollectionIterator(page ServiceCollectionPage) ServiceCollectionIterator { + return ServiceCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sc ServiceCollection) IsEmpty() bool { + return sc.Value == nil || len(*sc.Value) == 0 +} + +// serviceCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sc ServiceCollection) serviceCollectionPreparer(ctx context.Context) (*http.Request, error) { + if sc.OdataNextLink == nil || len(to.String(sc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sc.OdataNextLink))) +} + +// ServiceCollectionPage contains a page of Service values. +type ServiceCollectionPage struct { + fn func(context.Context, ServiceCollection) (ServiceCollection, error) + sc ServiceCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ServiceCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ServiceCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.sc) + if err != nil { + return err + } + page.sc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ServiceCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ServiceCollectionPage) NotDone() bool { + return !page.sc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ServiceCollectionPage) Response() ServiceCollection { + return page.sc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ServiceCollectionPage) Values() []Service { + if page.sc.IsEmpty() { + return nil + } + return *page.sc.Value +} + +// Creates a new instance of the ServiceCollectionPage type. +func NewServiceCollectionPage(getNextPage func(context.Context, ServiceCollection) (ServiceCollection, error)) ServiceCollectionPage { + return ServiceCollectionPage{fn: getNextPage} +} + +// ServiceProperties properties of the Media Services account. +type ServiceProperties struct { + // MediaServiceID - The Media Services account ID. + MediaServiceID *uuid.UUID `json:"mediaServiceId,omitempty"` + // StorageAccounts - The storage accounts for this resource. + StorageAccounts *[]StorageAccount `json:"storageAccounts,omitempty"` +} + +// ServiceSpecification the service metric specifications. +type ServiceSpecification struct { + // MetricSpecifications - List of metric specifications. + MetricSpecifications *[]Metric `json:"metricSpecifications,omitempty"` +} + +// StandardEncoderPreset describes all the settings to be used when encoding the input video with the +// Standard Encoder. +type StandardEncoderPreset struct { + // Filters - One or more filtering operations that are applied to the input media before encoding. + Filters *Filters `json:"filters,omitempty"` + // Codecs - The list of codecs to be used when encoding the input video. + Codecs *[]BasicCodec `json:"codecs,omitempty"` + // Formats - The list of outputs to be produced by the encoder. + Formats *[]BasicFormat `json:"formats,omitempty"` + // OdataType - Possible values include: 'OdataTypePreset', 'OdataTypeMicrosoftMediaAudioAnalyzerPreset', 'OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset', 'OdataTypeMicrosoftMediaStandardEncoderPreset', 'OdataTypeMicrosoftMediaVideoAnalyzerPreset' + OdataType OdataTypeBasicPreset `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for StandardEncoderPreset. +func (sep StandardEncoderPreset) MarshalJSON() ([]byte, error) { + sep.OdataType = OdataTypeMicrosoftMediaStandardEncoderPreset + objectMap := make(map[string]interface{}) + if sep.Filters != nil { + objectMap["filters"] = sep.Filters + } + if sep.Codecs != nil { + objectMap["codecs"] = sep.Codecs + } + if sep.Formats != nil { + objectMap["formats"] = sep.Formats + } + if sep.OdataType != "" { + objectMap["@odata.type"] = sep.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioAnalyzerPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBasicAudioAnalyzerPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsBasicAudioAnalyzerPreset() (BasicAudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBuiltInStandardEncoderPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsBuiltInStandardEncoderPreset() (*BuiltInStandardEncoderPreset, bool) { + return nil, false +} + +// AsStandardEncoderPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsStandardEncoderPreset() (*StandardEncoderPreset, bool) { + return &sep, true +} + +// AsVideoAnalyzerPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) { + return nil, false +} + +// AsPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsPreset() (*Preset, bool) { + return nil, false +} + +// AsBasicPreset is the BasicPreset implementation for StandardEncoderPreset. +func (sep StandardEncoderPreset) AsBasicPreset() (BasicPreset, bool) { + return &sep, true +} + +// UnmarshalJSON is the custom unmarshaler for StandardEncoderPreset struct. +func (sep *StandardEncoderPreset) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "filters": + if v != nil { + var filters Filters + err = json.Unmarshal(*v, &filters) + if err != nil { + return err + } + sep.Filters = &filters + } + case "codecs": + if v != nil { + codecs, err := unmarshalBasicCodecArray(*v) + if err != nil { + return err + } + sep.Codecs = &codecs + } + case "formats": + if v != nil { + formats, err := unmarshalBasicFormatArray(*v) + if err != nil { + return err + } + sep.Formats = &formats + } + case "@odata.type": + if v != nil { + var odataType OdataTypeBasicPreset + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + sep.OdataType = odataType + } + } + } + + return nil +} + +// StorageAccount the storage account details. +type StorageAccount struct { + // ID - The ID of the storage account resource. Media Services relies on tables and queues as well as blobs, so the primary storage account must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). Blob only storage accounts can be added as secondary storage accounts. + ID *string `json:"id,omitempty"` + // Type - The type of the storage account. Possible values include: 'Primary', 'Secondary' + Type StorageAccountType `json:"type,omitempty"` +} + +// StorageEncryptedAssetDecryptionData data needed to decrypt asset files encrypted with legacy storage +// encryption. +type StorageEncryptedAssetDecryptionData struct { + autorest.Response `json:"-"` + // Key - The Asset File storage encryption key. + Key *[]byte `json:"key,omitempty"` + // AssetFileEncryptionMetadata - Asset File encryption metadata. + AssetFileEncryptionMetadata *[]AssetFileEncryptionMetadata `json:"assetFileEncryptionMetadata,omitempty"` +} + +// StreamingEndpoint the StreamingEndpoint. +type StreamingEndpoint struct { + autorest.Response `json:"-"` + // StreamingEndpointProperties - The StreamingEndpoint properties. + *StreamingEndpointProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The Azure Region of the resource. + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for StreamingEndpoint. +func (se StreamingEndpoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if se.StreamingEndpointProperties != nil { + objectMap["properties"] = se.StreamingEndpointProperties + } + if se.Tags != nil { + objectMap["tags"] = se.Tags + } + if se.Location != nil { + objectMap["location"] = se.Location + } + if se.ID != nil { + objectMap["id"] = se.ID + } + if se.Name != nil { + objectMap["name"] = se.Name + } + if se.Type != nil { + objectMap["type"] = se.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for StreamingEndpoint struct. +func (se *StreamingEndpoint) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var streamingEndpointProperties StreamingEndpointProperties + err = json.Unmarshal(*v, &streamingEndpointProperties) + if err != nil { + return err + } + se.StreamingEndpointProperties = &streamingEndpointProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + se.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + se.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + se.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + se.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + se.Type = &typeVar + } + } + } + + return nil +} + +// StreamingEndpointAccessControl streamingEndpoint access control definition. +type StreamingEndpointAccessControl struct { + // Akamai - The access control of Akamai + Akamai *AkamaiAccessControl `json:"akamai,omitempty"` + // IP - The IP access control of the StreamingEndpoint. + IP *IPAccessControl `json:"ip,omitempty"` +} + +// StreamingEndpointListResult the StreamingEndpoint list result. +type StreamingEndpointListResult struct { + autorest.Response `json:"-"` + // Value - The result of the List StreamingEndpoint operation. + Value *[]StreamingEndpoint `json:"value,omitempty"` + // OdataCount - The number of result. + OdataCount *int32 `json:"@odata.count,omitempty"` + // OdataNextLink - Th link to the next set of results. Not empty if value contains incomplete list of StreamingEndpoints. + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// StreamingEndpointListResultIterator provides access to a complete listing of StreamingEndpoint values. +type StreamingEndpointListResultIterator struct { + i int + page StreamingEndpointListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *StreamingEndpointListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *StreamingEndpointListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter StreamingEndpointListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter StreamingEndpointListResultIterator) Response() StreamingEndpointListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter StreamingEndpointListResultIterator) Value() StreamingEndpoint { + if !iter.page.NotDone() { + return StreamingEndpoint{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the StreamingEndpointListResultIterator type. +func NewStreamingEndpointListResultIterator(page StreamingEndpointListResultPage) StreamingEndpointListResultIterator { + return StreamingEndpointListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (selr StreamingEndpointListResult) IsEmpty() bool { + return selr.Value == nil || len(*selr.Value) == 0 +} + +// streamingEndpointListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (selr StreamingEndpointListResult) streamingEndpointListResultPreparer(ctx context.Context) (*http.Request, error) { + if selr.OdataNextLink == nil || len(to.String(selr.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(selr.OdataNextLink))) +} + +// StreamingEndpointListResultPage contains a page of StreamingEndpoint values. +type StreamingEndpointListResultPage struct { + fn func(context.Context, StreamingEndpointListResult) (StreamingEndpointListResult, error) + selr StreamingEndpointListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *StreamingEndpointListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.selr) + if err != nil { + return err + } + page.selr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *StreamingEndpointListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page StreamingEndpointListResultPage) NotDone() bool { + return !page.selr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page StreamingEndpointListResultPage) Response() StreamingEndpointListResult { + return page.selr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page StreamingEndpointListResultPage) Values() []StreamingEndpoint { + if page.selr.IsEmpty() { + return nil + } + return *page.selr.Value +} + +// Creates a new instance of the StreamingEndpointListResultPage type. +func NewStreamingEndpointListResultPage(getNextPage func(context.Context, StreamingEndpointListResult) (StreamingEndpointListResult, error)) StreamingEndpointListResultPage { + return StreamingEndpointListResultPage{fn: getNextPage} +} + +// StreamingEndpointProperties the StreamingEndpoint properties. +type StreamingEndpointProperties struct { + // Description - The StreamingEndpoint description. + Description *string `json:"description,omitempty"` + // ScaleUnits - The number of scale units. Use the Scale operation to adjust this value. + ScaleUnits *int32 `json:"scaleUnits,omitempty"` + // AvailabilitySetName - The name of the AvailabilitySet used with this StreamingEndpoint for high availability streaming. This value can only be set at creation time. + AvailabilitySetName *string `json:"availabilitySetName,omitempty"` + // AccessControl - The access control definition of the StreamingEndpoint. + AccessControl *StreamingEndpointAccessControl `json:"accessControl,omitempty"` + // MaxCacheAge - Max cache age + MaxCacheAge *int64 `json:"maxCacheAge,omitempty"` + // CustomHostNames - The custom host names of the StreamingEndpoint + CustomHostNames *[]string `json:"customHostNames,omitempty"` + // HostName - The StreamingEndpoint host name. + HostName *string `json:"hostName,omitempty"` + // CdnEnabled - The CDN enabled flag. + CdnEnabled *bool `json:"cdnEnabled,omitempty"` + // CdnProvider - The CDN provider name. + CdnProvider *string `json:"cdnProvider,omitempty"` + // CdnProfile - The CDN profile name. + CdnProfile *string `json:"cdnProfile,omitempty"` + // ProvisioningState - The provisioning state of the StreamingEndpoint. + ProvisioningState *string `json:"provisioningState,omitempty"` + // ResourceState - The resource state of the StreamingEndpoint. Possible values include: 'StreamingEndpointResourceStateStopped', 'StreamingEndpointResourceStateStarting', 'StreamingEndpointResourceStateRunning', 'StreamingEndpointResourceStateStopping', 'StreamingEndpointResourceStateDeleting', 'StreamingEndpointResourceStateScaling' + ResourceState StreamingEndpointResourceState `json:"resourceState,omitempty"` + // CrossSiteAccessPolicies - The StreamingEndpoint access policies. + CrossSiteAccessPolicies *CrossSiteAccessPolicies `json:"crossSiteAccessPolicies,omitempty"` + // FreeTrialEndTime - The free trial expiration time. + FreeTrialEndTime *date.Time `json:"freeTrialEndTime,omitempty"` + // Created - The exact time the StreamingEndpoint was created. + Created *date.Time `json:"created,omitempty"` + // LastModified - The exact time the StreamingEndpoint was last modified. + LastModified *date.Time `json:"lastModified,omitempty"` +} + +// StreamingEndpointsCreateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type StreamingEndpointsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *StreamingEndpointsCreateFuture) Result(client StreamingEndpointsClient) (se StreamingEndpoint, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.StreamingEndpointsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if se.Response.Response, err = future.GetResult(sender); err == nil && se.Response.Response.StatusCode != http.StatusNoContent { + se, err = client.CreateResponder(se.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsCreateFuture", "Result", se.Response.Response, "Failure responding to request") + } + } + return +} + +// StreamingEndpointsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type StreamingEndpointsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *StreamingEndpointsDeleteFuture) Result(client StreamingEndpointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.StreamingEndpointsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// StreamingEndpointsScaleFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type StreamingEndpointsScaleFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *StreamingEndpointsScaleFuture) Result(client StreamingEndpointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsScaleFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.StreamingEndpointsScaleFuture") + return + } + ar.Response = future.Response() + return +} + +// StreamingEndpointsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type StreamingEndpointsStartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *StreamingEndpointsStartFuture) Result(client StreamingEndpointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.StreamingEndpointsStartFuture") + return + } + ar.Response = future.Response() + return +} + +// StreamingEndpointsStopFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type StreamingEndpointsStopFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *StreamingEndpointsStopFuture) Result(client StreamingEndpointsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.StreamingEndpointsStopFuture") + return + } + ar.Response = future.Response() + return +} + +// StreamingEndpointsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type StreamingEndpointsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *StreamingEndpointsUpdateFuture) Result(client StreamingEndpointsClient) (se StreamingEndpoint, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("media.StreamingEndpointsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if se.Response.Response, err = future.GetResult(sender); err == nil && se.Response.Response.StatusCode != http.StatusNoContent { + se, err = client.UpdateResponder(se.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsUpdateFuture", "Result", se.Response.Response, "Failure responding to request") + } + } + return +} + +// StreamingEntityScaleUnit scale units definition +type StreamingEntityScaleUnit struct { + // ScaleUnit - The scale unit number of the StreamingEndpoint. + ScaleUnit *int32 `json:"scaleUnit,omitempty"` +} + +// StreamingLocator a Streaming Locator resource +type StreamingLocator struct { + autorest.Response `json:"-"` + *StreamingLocatorProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for StreamingLocator. +func (sl StreamingLocator) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sl.StreamingLocatorProperties != nil { + objectMap["properties"] = sl.StreamingLocatorProperties + } + if sl.ID != nil { + objectMap["id"] = sl.ID + } + if sl.Name != nil { + objectMap["name"] = sl.Name + } + if sl.Type != nil { + objectMap["type"] = sl.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for StreamingLocator struct. +func (sl *StreamingLocator) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var streamingLocatorProperties StreamingLocatorProperties + err = json.Unmarshal(*v, &streamingLocatorProperties) + if err != nil { + return err + } + sl.StreamingLocatorProperties = &streamingLocatorProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sl.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sl.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + sl.Type = &typeVar + } + } + } + + return nil +} + +// StreamingLocatorCollection a collection of StreamingLocator items. +type StreamingLocatorCollection struct { + autorest.Response `json:"-"` + // Value - A collection of StreamingLocator items. + Value *[]StreamingLocator `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// StreamingLocatorCollectionIterator provides access to a complete listing of StreamingLocator values. +type StreamingLocatorCollectionIterator struct { + i int + page StreamingLocatorCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *StreamingLocatorCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *StreamingLocatorCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter StreamingLocatorCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter StreamingLocatorCollectionIterator) Response() StreamingLocatorCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter StreamingLocatorCollectionIterator) Value() StreamingLocator { + if !iter.page.NotDone() { + return StreamingLocator{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the StreamingLocatorCollectionIterator type. +func NewStreamingLocatorCollectionIterator(page StreamingLocatorCollectionPage) StreamingLocatorCollectionIterator { + return StreamingLocatorCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (slc StreamingLocatorCollection) IsEmpty() bool { + return slc.Value == nil || len(*slc.Value) == 0 +} + +// streamingLocatorCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (slc StreamingLocatorCollection) streamingLocatorCollectionPreparer(ctx context.Context) (*http.Request, error) { + if slc.OdataNextLink == nil || len(to.String(slc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(slc.OdataNextLink))) +} + +// StreamingLocatorCollectionPage contains a page of StreamingLocator values. +type StreamingLocatorCollectionPage struct { + fn func(context.Context, StreamingLocatorCollection) (StreamingLocatorCollection, error) + slc StreamingLocatorCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *StreamingLocatorCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.slc) + if err != nil { + return err + } + page.slc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *StreamingLocatorCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page StreamingLocatorCollectionPage) NotDone() bool { + return !page.slc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page StreamingLocatorCollectionPage) Response() StreamingLocatorCollection { + return page.slc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page StreamingLocatorCollectionPage) Values() []StreamingLocator { + if page.slc.IsEmpty() { + return nil + } + return *page.slc.Value +} + +// Creates a new instance of the StreamingLocatorCollectionPage type. +func NewStreamingLocatorCollectionPage(getNextPage func(context.Context, StreamingLocatorCollection) (StreamingLocatorCollection, error)) StreamingLocatorCollectionPage { + return StreamingLocatorCollectionPage{fn: getNextPage} +} + +// StreamingLocatorContentKey class for content key in Streaming Locator +type StreamingLocatorContentKey struct { + // ID - ID of Content Key + ID *uuid.UUID `json:"id,omitempty"` + // Type - Encryption type of Content Key. Possible values include: 'StreamingLocatorContentKeyTypeCommonEncryptionCenc', 'StreamingLocatorContentKeyTypeCommonEncryptionCbcs', 'StreamingLocatorContentKeyTypeEnvelopeEncryption' + Type StreamingLocatorContentKeyType `json:"type,omitempty"` + // LabelReferenceInStreamingPolicy - Label of Content Key as specified in the Streaming Policy + LabelReferenceInStreamingPolicy *string `json:"labelReferenceInStreamingPolicy,omitempty"` + // Value - Value of Content Key + Value *string `json:"value,omitempty"` + // PolicyName - ContentKeyPolicy used by Content Key + PolicyName *string `json:"policyName,omitempty"` + // Tracks - Tracks which use this Content Key + Tracks *[]TrackSelection `json:"tracks,omitempty"` +} + +// StreamingLocatorProperties properties of the Streaming Locator. +type StreamingLocatorProperties struct { + // AssetName - Asset Name + AssetName *string `json:"assetName,omitempty"` + // Created - The creation time of the Streaming Locator. + Created *date.Time `json:"created,omitempty"` + // StartTime - The start time of the Streaming Locator. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - The end time of the Streaming Locator. + EndTime *date.Time `json:"endTime,omitempty"` + // StreamingLocatorID - The StreamingLocatorId of the Streaming Locator. + StreamingLocatorID *uuid.UUID `json:"streamingLocatorId,omitempty"` + // StreamingPolicyName - Name of the Streaming Policy used by this Streaming Locator. Either specify the name of Streaming Policy you created or use one of the predefined Streaming Policies. The predefined Streaming Policies available are: 'Predefined_DownloadOnly', 'Predefined_ClearStreamingOnly', 'Predefined_DownloadAndClearStreaming', 'Predefined_ClearKey', 'Predefined_MultiDrmCencStreaming' and 'Predefined_MultiDrmStreaming' + StreamingPolicyName *string `json:"streamingPolicyName,omitempty"` + // DefaultContentKeyPolicyName - Name of the default ContentKeyPolicy used by this Streaming Locator. + DefaultContentKeyPolicyName *string `json:"defaultContentKeyPolicyName,omitempty"` + // ContentKeys - The ContentKeys used by this Streaming Locator. + ContentKeys *[]StreamingLocatorContentKey `json:"contentKeys,omitempty"` + // AlternativeMediaID - Alternative Media ID of this Streaming Locator + AlternativeMediaID *string `json:"alternativeMediaId,omitempty"` +} + +// StreamingPath class of paths for streaming +type StreamingPath struct { + // StreamingProtocol - Streaming protocol. Possible values include: 'StreamingPolicyStreamingProtocolHls', 'StreamingPolicyStreamingProtocolDash', 'StreamingPolicyStreamingProtocolSmoothStreaming', 'StreamingPolicyStreamingProtocolDownload' + StreamingProtocol StreamingPolicyStreamingProtocol `json:"streamingProtocol,omitempty"` + // EncryptionScheme - Encryption scheme. Possible values include: 'EncryptionSchemeNoEncryption', 'EncryptionSchemeEnvelopeEncryption', 'EncryptionSchemeCommonEncryptionCenc', 'EncryptionSchemeCommonEncryptionCbcs' + EncryptionScheme EncryptionScheme `json:"encryptionScheme,omitempty"` + // Paths - Streaming paths for each protocol and encryptionScheme pair + Paths *[]string `json:"paths,omitempty"` +} + +// StreamingPolicy a Streaming Policy resource +type StreamingPolicy struct { + autorest.Response `json:"-"` + *StreamingPolicyProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for StreamingPolicy. +func (sp StreamingPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sp.StreamingPolicyProperties != nil { + objectMap["properties"] = sp.StreamingPolicyProperties + } + if sp.ID != nil { + objectMap["id"] = sp.ID + } + if sp.Name != nil { + objectMap["name"] = sp.Name + } + if sp.Type != nil { + objectMap["type"] = sp.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for StreamingPolicy struct. +func (sp *StreamingPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var streamingPolicyProperties StreamingPolicyProperties + err = json.Unmarshal(*v, &streamingPolicyProperties) + if err != nil { + return err + } + sp.StreamingPolicyProperties = &streamingPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + sp.Type = &typeVar + } + } + } + + return nil +} + +// StreamingPolicyCollection a collection of StreamingPolicy items. +type StreamingPolicyCollection struct { + autorest.Response `json:"-"` + // Value - A collection of StreamingPolicy items. + Value *[]StreamingPolicy `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// StreamingPolicyCollectionIterator provides access to a complete listing of StreamingPolicy values. +type StreamingPolicyCollectionIterator struct { + i int + page StreamingPolicyCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *StreamingPolicyCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPolicyCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *StreamingPolicyCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter StreamingPolicyCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter StreamingPolicyCollectionIterator) Response() StreamingPolicyCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter StreamingPolicyCollectionIterator) Value() StreamingPolicy { + if !iter.page.NotDone() { + return StreamingPolicy{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the StreamingPolicyCollectionIterator type. +func NewStreamingPolicyCollectionIterator(page StreamingPolicyCollectionPage) StreamingPolicyCollectionIterator { + return StreamingPolicyCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spc StreamingPolicyCollection) IsEmpty() bool { + return spc.Value == nil || len(*spc.Value) == 0 +} + +// streamingPolicyCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spc StreamingPolicyCollection) streamingPolicyCollectionPreparer(ctx context.Context) (*http.Request, error) { + if spc.OdataNextLink == nil || len(to.String(spc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spc.OdataNextLink))) +} + +// StreamingPolicyCollectionPage contains a page of StreamingPolicy values. +type StreamingPolicyCollectionPage struct { + fn func(context.Context, StreamingPolicyCollection) (StreamingPolicyCollection, error) + spc StreamingPolicyCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *StreamingPolicyCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPolicyCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spc) + if err != nil { + return err + } + page.spc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *StreamingPolicyCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page StreamingPolicyCollectionPage) NotDone() bool { + return !page.spc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page StreamingPolicyCollectionPage) Response() StreamingPolicyCollection { + return page.spc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page StreamingPolicyCollectionPage) Values() []StreamingPolicy { + if page.spc.IsEmpty() { + return nil + } + return *page.spc.Value +} + +// Creates a new instance of the StreamingPolicyCollectionPage type. +func NewStreamingPolicyCollectionPage(getNextPage func(context.Context, StreamingPolicyCollection) (StreamingPolicyCollection, error)) StreamingPolicyCollectionPage { + return StreamingPolicyCollectionPage{fn: getNextPage} +} + +// StreamingPolicyContentKey class to specify properties of content key +type StreamingPolicyContentKey struct { + // Label - Label can be used to specify Content Key when creating a Streaming Locator + Label *string `json:"label,omitempty"` + // PolicyName - Policy used by Content Key + PolicyName *string `json:"policyName,omitempty"` + // Tracks - Tracks which use this content key + Tracks *[]TrackSelection `json:"tracks,omitempty"` +} + +// StreamingPolicyContentKeys class to specify properties of all content keys in Streaming Policy +type StreamingPolicyContentKeys struct { + // DefaultKey - Default content key for an encryption scheme + DefaultKey *DefaultKey `json:"defaultKey,omitempty"` + // KeyToTrackMappings - Representing tracks needs separate content key + KeyToTrackMappings *[]StreamingPolicyContentKey `json:"keyToTrackMappings,omitempty"` +} + +// StreamingPolicyFairPlayConfiguration class to specify configurations of FairPlay in Streaming Policy +type StreamingPolicyFairPlayConfiguration struct { + // CustomLicenseAcquisitionURLTemplate - The template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty"` + // AllowPersistentLicense - All license to be persistent or not + AllowPersistentLicense *bool `json:"allowPersistentLicense,omitempty"` +} + +// StreamingPolicyPlayReadyConfiguration class to specify configurations of PlayReady in Streaming Policy +type StreamingPolicyPlayReadyConfiguration struct { + // CustomLicenseAcquisitionURLTemplate - The template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty"` + // PlayReadyCustomAttributes - Custom attributes for PlayReady + PlayReadyCustomAttributes *string `json:"playReadyCustomAttributes,omitempty"` +} + +// StreamingPolicyProperties class to specify properties of Streaming Policy +type StreamingPolicyProperties struct { + // Created - Creation time of Streaming Policy + Created *date.Time `json:"created,omitempty"` + // DefaultContentKeyPolicyName - Default ContentKey used by current Streaming Policy + DefaultContentKeyPolicyName *string `json:"defaultContentKeyPolicyName,omitempty"` + // EnvelopeEncryption - Configuration of EnvelopeEncryption + EnvelopeEncryption *EnvelopeEncryption `json:"envelopeEncryption,omitempty"` + // CommonEncryptionCenc - Configuration of CommonEncryptionCenc + CommonEncryptionCenc *CommonEncryptionCenc `json:"commonEncryptionCenc,omitempty"` + // CommonEncryptionCbcs - Configuration of CommonEncryptionCbcs + CommonEncryptionCbcs *CommonEncryptionCbcs `json:"commonEncryptionCbcs,omitempty"` + // NoEncryption - Configurations of NoEncryption + NoEncryption *NoEncryption `json:"noEncryption,omitempty"` +} + +// StreamingPolicyWidevineConfiguration class to specify configurations of Widevine in Streaming Policy +type StreamingPolicyWidevineConfiguration struct { + // CustomLicenseAcquisitionURLTemplate - The template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty"` +} + +// SubscriptionMediaService a Media Services account. +type SubscriptionMediaService struct { + autorest.Response `json:"-"` + // ServiceProperties - The resource properties. + *ServiceProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The Azure Region of the resource. + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SubscriptionMediaService. +func (sms SubscriptionMediaService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sms.ServiceProperties != nil { + objectMap["properties"] = sms.ServiceProperties + } + if sms.Tags != nil { + objectMap["tags"] = sms.Tags + } + if sms.Location != nil { + objectMap["location"] = sms.Location + } + if sms.ID != nil { + objectMap["id"] = sms.ID + } + if sms.Name != nil { + objectMap["name"] = sms.Name + } + if sms.Type != nil { + objectMap["type"] = sms.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SubscriptionMediaService struct. +func (sms *SubscriptionMediaService) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var serviceProperties ServiceProperties + err = json.Unmarshal(*v, &serviceProperties) + if err != nil { + return err + } + sms.ServiceProperties = &serviceProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + sms.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + sms.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sms.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sms.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + sms.Type = &typeVar + } + } + } + + return nil +} + +// SubscriptionMediaServiceCollection a collection of SubscriptionMediaService items. +type SubscriptionMediaServiceCollection struct { + autorest.Response `json:"-"` + // Value - A collection of SubscriptionMediaService items. + Value *[]SubscriptionMediaService `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// SubscriptionMediaServiceCollectionIterator provides access to a complete listing of +// SubscriptionMediaService values. +type SubscriptionMediaServiceCollectionIterator struct { + i int + page SubscriptionMediaServiceCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SubscriptionMediaServiceCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionMediaServiceCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SubscriptionMediaServiceCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SubscriptionMediaServiceCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SubscriptionMediaServiceCollectionIterator) Response() SubscriptionMediaServiceCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SubscriptionMediaServiceCollectionIterator) Value() SubscriptionMediaService { + if !iter.page.NotDone() { + return SubscriptionMediaService{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SubscriptionMediaServiceCollectionIterator type. +func NewSubscriptionMediaServiceCollectionIterator(page SubscriptionMediaServiceCollectionPage) SubscriptionMediaServiceCollectionIterator { + return SubscriptionMediaServiceCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (smsc SubscriptionMediaServiceCollection) IsEmpty() bool { + return smsc.Value == nil || len(*smsc.Value) == 0 +} + +// subscriptionMediaServiceCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (smsc SubscriptionMediaServiceCollection) subscriptionMediaServiceCollectionPreparer(ctx context.Context) (*http.Request, error) { + if smsc.OdataNextLink == nil || len(to.String(smsc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(smsc.OdataNextLink))) +} + +// SubscriptionMediaServiceCollectionPage contains a page of SubscriptionMediaService values. +type SubscriptionMediaServiceCollectionPage struct { + fn func(context.Context, SubscriptionMediaServiceCollection) (SubscriptionMediaServiceCollection, error) + smsc SubscriptionMediaServiceCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SubscriptionMediaServiceCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionMediaServiceCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.smsc) + if err != nil { + return err + } + page.smsc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SubscriptionMediaServiceCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SubscriptionMediaServiceCollectionPage) NotDone() bool { + return !page.smsc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SubscriptionMediaServiceCollectionPage) Response() SubscriptionMediaServiceCollection { + return page.smsc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SubscriptionMediaServiceCollectionPage) Values() []SubscriptionMediaService { + if page.smsc.IsEmpty() { + return nil + } + return *page.smsc.Value +} + +// Creates a new instance of the SubscriptionMediaServiceCollectionPage type. +func NewSubscriptionMediaServiceCollectionPage(getNextPage func(context.Context, SubscriptionMediaServiceCollection) (SubscriptionMediaServiceCollection, error)) SubscriptionMediaServiceCollectionPage { + return SubscriptionMediaServiceCollectionPage{fn: getNextPage} +} + +// SyncStorageKeysInput the input to the sync storage keys request. +type SyncStorageKeysInput struct { + // ID - The ID of the storage account resource. + ID *string `json:"id,omitempty"` +} + +// TrackedResource the resource model definition for a ARM tracked resource. +type TrackedResource struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The Azure Region of the resource. + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TrackedResource. +func (tr TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tr.Tags != nil { + objectMap["tags"] = tr.Tags + } + if tr.Location != nil { + objectMap["location"] = tr.Location + } + if tr.ID != nil { + objectMap["id"] = tr.ID + } + if tr.Name != nil { + objectMap["name"] = tr.Name + } + if tr.Type != nil { + objectMap["type"] = tr.Type + } + return json.Marshal(objectMap) +} + +// TrackPropertyCondition class to specify one track property condition +type TrackPropertyCondition struct { + // Property - Track property type. Possible values include: 'TrackPropertyTypeUnknown', 'TrackPropertyTypeFourCC' + Property TrackPropertyType `json:"property,omitempty"` + // Operation - Track property condition operation. Possible values include: 'TrackPropertyCompareOperationUnknown', 'TrackPropertyCompareOperationEqual' + Operation TrackPropertyCompareOperation `json:"operation,omitempty"` + // Value - Track property value + Value *string `json:"value,omitempty"` +} + +// TrackSelection class to select a track +type TrackSelection struct { + // TrackSelections - TrackSelections is a track property condition list which can specify track(s) + TrackSelections *[]TrackPropertyCondition `json:"trackSelections,omitempty"` +} + +// Transform a Transform encapsulates the rules or instructions for generating desired outputs from input +// media, such as by transcoding or by extracting insights. After the Transform is created, it can be +// applied to input media by creating Jobs. +type Transform struct { + autorest.Response `json:"-"` + // TransformProperties - The resource properties. + *TransformProperties `json:"properties,omitempty"` + // ID - Fully qualified resource ID for the resource. + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Type - The type of the resource. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Transform. +func (t Transform) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if t.TransformProperties != nil { + objectMap["properties"] = t.TransformProperties + } + if t.ID != nil { + objectMap["id"] = t.ID + } + if t.Name != nil { + objectMap["name"] = t.Name + } + if t.Type != nil { + objectMap["type"] = t.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Transform struct. +func (t *Transform) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var transformProperties TransformProperties + err = json.Unmarshal(*v, &transformProperties) + if err != nil { + return err + } + t.TransformProperties = &transformProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + t.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + t.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + t.Type = &typeVar + } + } + } + + return nil +} + +// TransformCollection a collection of Transform items. +type TransformCollection struct { + autorest.Response `json:"-"` + // Value - A collection of Transform items. + Value *[]Transform `json:"value,omitempty"` + // OdataNextLink - A link to the next page of the collection (when the collection contains too many results to return in one response). + OdataNextLink *string `json:"@odata.nextLink,omitempty"` +} + +// TransformCollectionIterator provides access to a complete listing of Transform values. +type TransformCollectionIterator struct { + i int + page TransformCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *TransformCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *TransformCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter TransformCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter TransformCollectionIterator) Response() TransformCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter TransformCollectionIterator) Value() Transform { + if !iter.page.NotDone() { + return Transform{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the TransformCollectionIterator type. +func NewTransformCollectionIterator(page TransformCollectionPage) TransformCollectionIterator { + return TransformCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (tc TransformCollection) IsEmpty() bool { + return tc.Value == nil || len(*tc.Value) == 0 +} + +// transformCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (tc TransformCollection) transformCollectionPreparer(ctx context.Context) (*http.Request, error) { + if tc.OdataNextLink == nil || len(to.String(tc.OdataNextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(tc.OdataNextLink))) +} + +// TransformCollectionPage contains a page of Transform values. +type TransformCollectionPage struct { + fn func(context.Context, TransformCollection) (TransformCollection, error) + tc TransformCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *TransformCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.tc) + if err != nil { + return err + } + page.tc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *TransformCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page TransformCollectionPage) NotDone() bool { + return !page.tc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page TransformCollectionPage) Response() TransformCollection { + return page.tc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page TransformCollectionPage) Values() []Transform { + if page.tc.IsEmpty() { + return nil + } + return *page.tc.Value +} + +// Creates a new instance of the TransformCollectionPage type. +func NewTransformCollectionPage(getNextPage func(context.Context, TransformCollection) (TransformCollection, error)) TransformCollectionPage { + return TransformCollectionPage{fn: getNextPage} +} + +// TransformOutput describes the properties of a TransformOutput, which are the rules to be applied while +// generating the desired output. +type TransformOutput struct { + // OnError - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 'ContinueJob'. The default is 'StopProcessingJob'. Possible values include: 'StopProcessingJob', 'ContinueJob' + OnError OnErrorType `json:"onError,omitempty"` + // RelativePriority - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing TransformOutputs. The default priority is Normal. Possible values include: 'Low', 'Normal', 'High' + RelativePriority Priority `json:"relativePriority,omitempty"` + // Preset - Preset that describes the operations that will be used to modify, transcode, or extract insights from the source file to generate the output. + Preset BasicPreset `json:"preset,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for TransformOutput struct. +func (toVar *TransformOutput) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "onError": + if v != nil { + var onError OnErrorType + err = json.Unmarshal(*v, &onError) + if err != nil { + return err + } + toVar.OnError = onError + } + case "relativePriority": + if v != nil { + var relativePriority Priority + err = json.Unmarshal(*v, &relativePriority) + if err != nil { + return err + } + toVar.RelativePriority = relativePriority + } + case "preset": + if v != nil { + preset, err := unmarshalBasicPreset(*v) + if err != nil { + return err + } + toVar.Preset = preset + } + } + } + + return nil +} + +// TransformProperties a Transform. +type TransformProperties struct { + // Created - The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ' format. + Created *date.Time `json:"created,omitempty"` + // Description - An optional verbose description of the Transform. + Description *string `json:"description,omitempty"` + // LastModified - The UTC date and time when the Transform was last updated, in 'YYYY-MM-DDThh:mm:ssZ' format. + LastModified *date.Time `json:"lastModified,omitempty"` + // Outputs - An array of one or more TransformOutputs that the Transform should generate. + Outputs *[]TransformOutput `json:"outputs,omitempty"` +} + +// TransportStreamFormat describes the properties for generating an MPEG-2 Transport Stream (ISO/IEC +// 13818-1) output video file(s). +type TransportStreamFormat struct { + // OutputFiles - The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . + OutputFiles *[]OutputFile `json:"outputFiles,omitempty"` + // FilenamePattern - The pattern of the file names for the generated output files. The following macros are supported in the file name: {Basename} - The base name of the input video {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty"` + // OdataType - Possible values include: 'OdataTypeFormat', 'OdataTypeMicrosoftMediaImageFormat', 'OdataTypeMicrosoftMediaJpgFormat', 'OdataTypeMicrosoftMediaPngFormat', 'OdataTypeMicrosoftMediaMultiBitrateFormat', 'OdataTypeMicrosoftMediaMp4Format', 'OdataTypeMicrosoftMediaTransportStreamFormat' + OdataType OdataTypeBasicFormat `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TransportStreamFormat. +func (tsf TransportStreamFormat) MarshalJSON() ([]byte, error) { + tsf.OdataType = OdataTypeMicrosoftMediaTransportStreamFormat + objectMap := make(map[string]interface{}) + if tsf.OutputFiles != nil { + objectMap["outputFiles"] = tsf.OutputFiles + } + if tsf.FilenamePattern != nil { + objectMap["filenamePattern"] = tsf.FilenamePattern + } + if tsf.OdataType != "" { + objectMap["@odata.type"] = tsf.OdataType + } + return json.Marshal(objectMap) +} + +// AsImageFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsImageFormat() (*ImageFormat, bool) { + return nil, false +} + +// AsBasicImageFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsBasicImageFormat() (BasicImageFormat, bool) { + return nil, false +} + +// AsJpgFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsJpgFormat() (*JpgFormat, bool) { + return nil, false +} + +// AsPngFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsPngFormat() (*PngFormat, bool) { + return nil, false +} + +// AsMultiBitrateFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsMultiBitrateFormat() (*MultiBitrateFormat, bool) { + return nil, false +} + +// AsBasicMultiBitrateFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsBasicMultiBitrateFormat() (BasicMultiBitrateFormat, bool) { + return &tsf, true +} + +// AsMp4Format is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsMp4Format() (*Mp4Format, bool) { + return nil, false +} + +// AsTransportStreamFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsTransportStreamFormat() (*TransportStreamFormat, bool) { + return &tsf, true +} + +// AsFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsFormat() (*Format, bool) { + return nil, false +} + +// AsBasicFormat is the BasicFormat implementation for TransportStreamFormat. +func (tsf TransportStreamFormat) AsBasicFormat() (BasicFormat, bool) { + return &tsf, true +} + +// BasicVideo describes the basic properties for encoding the input video. +type BasicVideo interface { + AsImage() (*Image, bool) + AsBasicImage() (BasicImage, bool) + AsH264Video() (*H264Video, bool) + AsJpgImage() (*JpgImage, bool) + AsPngImage() (*PngImage, bool) + AsVideo() (*Video, bool) +} + +// Video describes the basic properties for encoding the input video. +type Video struct { + // KeyFrameInterval - The distance between two key frames, thereby defining a group of pictures (GOP). The value should be a non-zero integer in the range [1, 30] seconds, specified in ISO 8601 format. The default is 2 seconds (PT2S). + KeyFrameInterval *string `json:"keyFrameInterval,omitempty"` + // StretchMode - The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize. Possible values include: 'StretchModeNone', 'StretchModeAutoSize', 'StretchModeAutoFit' + StretchMode StretchMode `json:"stretchMode,omitempty"` + // Label - An optional label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeCodec', 'OdataTypeMicrosoftMediaAudio', 'OdataTypeMicrosoftMediaAacAudio', 'OdataTypeMicrosoftMediaCopyVideo', 'OdataTypeMicrosoftMediaVideo', 'OdataTypeMicrosoftMediaImage', 'OdataTypeMicrosoftMediaCopyAudio', 'OdataTypeMicrosoftMediaH264Video', 'OdataTypeMicrosoftMediaJpgImage', 'OdataTypeMicrosoftMediaPngImage' + OdataType OdataTypeBasicCodec `json:"@odata.type,omitempty"` +} + +func unmarshalBasicVideo(body []byte) (BasicVideo, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaImage): + var i Image + err := json.Unmarshal(body, &i) + return i, err + case string(OdataTypeMicrosoftMediaH264Video): + var hv H264Video + err := json.Unmarshal(body, &hv) + return hv, err + case string(OdataTypeMicrosoftMediaJpgImage): + var ji JpgImage + err := json.Unmarshal(body, &ji) + return ji, err + case string(OdataTypeMicrosoftMediaPngImage): + var pi PngImage + err := json.Unmarshal(body, &pi) + return pi, err + default: + var vVar Video + err := json.Unmarshal(body, &vVar) + return vVar, err + } +} +func unmarshalBasicVideoArray(body []byte) ([]BasicVideo, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + vVarArray := make([]BasicVideo, len(rawMessages)) + + for index, rawMessage := range rawMessages { + vVar, err := unmarshalBasicVideo(*rawMessage) + if err != nil { + return nil, err + } + vVarArray[index] = vVar + } + return vVarArray, nil +} + +// MarshalJSON is the custom marshaler for Video. +func (vVar Video) MarshalJSON() ([]byte, error) { + vVar.OdataType = OdataTypeMicrosoftMediaVideo + objectMap := make(map[string]interface{}) + if vVar.KeyFrameInterval != nil { + objectMap["keyFrameInterval"] = vVar.KeyFrameInterval + } + if vVar.StretchMode != "" { + objectMap["stretchMode"] = vVar.StretchMode + } + if vVar.Label != nil { + objectMap["label"] = vVar.Label + } + if vVar.OdataType != "" { + objectMap["@odata.type"] = vVar.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudio is the BasicCodec implementation for Video. +func (vVar Video) AsAudio() (*Audio, bool) { + return nil, false +} + +// AsBasicAudio is the BasicCodec implementation for Video. +func (vVar Video) AsBasicAudio() (BasicAudio, bool) { + return nil, false +} + +// AsAacAudio is the BasicCodec implementation for Video. +func (vVar Video) AsAacAudio() (*AacAudio, bool) { + return nil, false +} + +// AsCopyVideo is the BasicCodec implementation for Video. +func (vVar Video) AsCopyVideo() (*CopyVideo, bool) { + return nil, false +} + +// AsVideo is the BasicCodec implementation for Video. +func (vVar Video) AsVideo() (*Video, bool) { + return &vVar, true +} + +// AsBasicVideo is the BasicCodec implementation for Video. +func (vVar Video) AsBasicVideo() (BasicVideo, bool) { + return &vVar, true +} + +// AsImage is the BasicCodec implementation for Video. +func (vVar Video) AsImage() (*Image, bool) { + return nil, false +} + +// AsBasicImage is the BasicCodec implementation for Video. +func (vVar Video) AsBasicImage() (BasicImage, bool) { + return nil, false +} + +// AsCopyAudio is the BasicCodec implementation for Video. +func (vVar Video) AsCopyAudio() (*CopyAudio, bool) { + return nil, false +} + +// AsH264Video is the BasicCodec implementation for Video. +func (vVar Video) AsH264Video() (*H264Video, bool) { + return nil, false +} + +// AsJpgImage is the BasicCodec implementation for Video. +func (vVar Video) AsJpgImage() (*JpgImage, bool) { + return nil, false +} + +// AsPngImage is the BasicCodec implementation for Video. +func (vVar Video) AsPngImage() (*PngImage, bool) { + return nil, false +} + +// AsCodec is the BasicCodec implementation for Video. +func (vVar Video) AsCodec() (*Codec, bool) { + return nil, false +} + +// AsBasicCodec is the BasicCodec implementation for Video. +func (vVar Video) AsBasicCodec() (BasicCodec, bool) { + return &vVar, true +} + +// VideoAnalyzerPreset a video analyzer preset that extracts insights (rich metadata) from both audio and +// video, and outputs a JSON format file. +type VideoAnalyzerPreset struct { + // InsightsToExtract - The type of insights to be extracted. If not set then based on the content the type will selected. If the content is audio only then only audio insights are extracted and if it is video only. Possible values include: 'AudioInsightsOnly', 'VideoInsightsOnly', 'AllInsights' + InsightsToExtract InsightsType `json:"insightsToExtract,omitempty"` + // AudioLanguage - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list of supported languages are, 'en-US', 'en-GB', 'es-ES', 'es-MX', 'fr-FR', 'it-IT', 'ja-JP', 'pt-BR', 'zh-CN', 'de-DE', 'ar-EG', 'ru-RU', 'hi-IN'. If not specified, automatic language detection would be employed. This feature currently supports English, Chinese, French, German, Italian, Japanese, Spanish, Russian, and Portuguese. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to English. + AudioLanguage *string `json:"audioLanguage,omitempty"` + // OdataType - Possible values include: 'OdataTypePreset', 'OdataTypeMicrosoftMediaAudioAnalyzerPreset', 'OdataTypeMicrosoftMediaBuiltInStandardEncoderPreset', 'OdataTypeMicrosoftMediaStandardEncoderPreset', 'OdataTypeMicrosoftMediaVideoAnalyzerPreset' + OdataType OdataTypeBasicPreset `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) MarshalJSON() ([]byte, error) { + vap.OdataType = OdataTypeMicrosoftMediaVideoAnalyzerPreset + objectMap := make(map[string]interface{}) + if vap.InsightsToExtract != "" { + objectMap["insightsToExtract"] = vap.InsightsToExtract + } + if vap.AudioLanguage != nil { + objectMap["audioLanguage"] = vap.AudioLanguage + } + if vap.OdataType != "" { + objectMap["@odata.type"] = vap.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioAnalyzerPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsAudioAnalyzerPreset() (*AudioAnalyzerPreset, bool) { + return nil, false +} + +// AsBasicAudioAnalyzerPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsBasicAudioAnalyzerPreset() (BasicAudioAnalyzerPreset, bool) { + return &vap, true +} + +// AsBuiltInStandardEncoderPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsBuiltInStandardEncoderPreset() (*BuiltInStandardEncoderPreset, bool) { + return nil, false +} + +// AsStandardEncoderPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsStandardEncoderPreset() (*StandardEncoderPreset, bool) { + return nil, false +} + +// AsVideoAnalyzerPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsVideoAnalyzerPreset() (*VideoAnalyzerPreset, bool) { + return &vap, true +} + +// AsPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsPreset() (*Preset, bool) { + return nil, false +} + +// AsBasicPreset is the BasicPreset implementation for VideoAnalyzerPreset. +func (vap VideoAnalyzerPreset) AsBasicPreset() (BasicPreset, bool) { + return &vap, true +} + +// BasicVideoLayer describes the settings to be used when encoding the input video into a desired output bitrate layer. +type BasicVideoLayer interface { + AsH264Layer() (*H264Layer, bool) + AsVideoLayer() (*VideoLayer, bool) +} + +// VideoLayer describes the settings to be used when encoding the input video into a desired output bitrate +// layer. +type VideoLayer struct { + // Bitrate - The average bitrate in bits per second at which to encode the input video when generating this layer. This is a required field. + Bitrate *int32 `json:"bitrate,omitempty"` + // MaxBitrate - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + MaxBitrate *int32 `json:"maxBitrate,omitempty"` + // BFrames - The number of B-frames to be used when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + BFrames *int32 `json:"bFrames,omitempty"` + // FrameRate - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + FrameRate *string `json:"frameRate,omitempty"` + // Slices - The number of slices to be used when encoding this layer. If not specified, default is zero, which means that encoder will use a single slice for each frame. + Slices *int32 `json:"slices,omitempty"` + // AdaptiveBFrame - Whether or not adaptive B-frames are to be used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. + AdaptiveBFrame *bool `json:"adaptiveBFrame,omitempty"` + // Width - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in width as the input. + Width *string `json:"width,omitempty"` + // Height - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 50% means the output video has half as many pixels in height as the input. + Height *string `json:"height,omitempty"` + // Label - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file. + Label *string `json:"label,omitempty"` + // OdataType - Possible values include: 'OdataTypeLayer', 'OdataTypeMicrosoftMediaVideoLayer', 'OdataTypeMicrosoftMediaH264Layer', 'OdataTypeMicrosoftMediaJpgLayer', 'OdataTypeMicrosoftMediaPngLayer' + OdataType OdataTypeBasicLayer `json:"@odata.type,omitempty"` +} + +func unmarshalBasicVideoLayer(body []byte) (BasicVideoLayer, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["@odata.type"] { + case string(OdataTypeMicrosoftMediaH264Layer): + var hl H264Layer + err := json.Unmarshal(body, &hl) + return hl, err + default: + var vl VideoLayer + err := json.Unmarshal(body, &vl) + return vl, err + } +} +func unmarshalBasicVideoLayerArray(body []byte) ([]BasicVideoLayer, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + vlArray := make([]BasicVideoLayer, len(rawMessages)) + + for index, rawMessage := range rawMessages { + vl, err := unmarshalBasicVideoLayer(*rawMessage) + if err != nil { + return nil, err + } + vlArray[index] = vl + } + return vlArray, nil +} + +// MarshalJSON is the custom marshaler for VideoLayer. +func (vl VideoLayer) MarshalJSON() ([]byte, error) { + vl.OdataType = OdataTypeMicrosoftMediaVideoLayer + objectMap := make(map[string]interface{}) + if vl.Bitrate != nil { + objectMap["bitrate"] = vl.Bitrate + } + if vl.MaxBitrate != nil { + objectMap["maxBitrate"] = vl.MaxBitrate + } + if vl.BFrames != nil { + objectMap["bFrames"] = vl.BFrames + } + if vl.FrameRate != nil { + objectMap["frameRate"] = vl.FrameRate + } + if vl.Slices != nil { + objectMap["slices"] = vl.Slices + } + if vl.AdaptiveBFrame != nil { + objectMap["adaptiveBFrame"] = vl.AdaptiveBFrame + } + if vl.Width != nil { + objectMap["width"] = vl.Width + } + if vl.Height != nil { + objectMap["height"] = vl.Height + } + if vl.Label != nil { + objectMap["label"] = vl.Label + } + if vl.OdataType != "" { + objectMap["@odata.type"] = vl.OdataType + } + return json.Marshal(objectMap) +} + +// AsVideoLayer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsVideoLayer() (*VideoLayer, bool) { + return &vl, true +} + +// AsBasicVideoLayer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsBasicVideoLayer() (BasicVideoLayer, bool) { + return &vl, true +} + +// AsH264Layer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsH264Layer() (*H264Layer, bool) { + return nil, false +} + +// AsJpgLayer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsJpgLayer() (*JpgLayer, bool) { + return nil, false +} + +// AsPngLayer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsPngLayer() (*PngLayer, bool) { + return nil, false +} + +// AsLayer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsLayer() (*Layer, bool) { + return nil, false +} + +// AsBasicLayer is the BasicLayer implementation for VideoLayer. +func (vl VideoLayer) AsBasicLayer() (BasicLayer, bool) { + return &vl, true +} + +// VideoOverlay describes the properties of a video overlay. +type VideoOverlay struct { + // Position - The location in the input video where the overlay is applied. + Position *Rectangle `json:"position,omitempty"` + // Opacity - The opacity of the overlay. This is a value in the range [0 - 1.0]. Default is 1.0 which mean the overlay is opaque. + Opacity *float64 `json:"opacity,omitempty"` + // CropRectangle - An optional rectangular window used to crop the overlay image or video. + CropRectangle *Rectangle `json:"cropRectangle,omitempty"` + // InputLabel - The label of the job input which is to be used as an overlay. The Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. + InputLabel *string `json:"inputLabel,omitempty"` + // Start - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds in to the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty"` + // End - The position in the input video at which the overlay ends. The value should be in ISO 8601 duration format. For example, PT30S to end the overlay at 30 seconds in to the input video. If not specified the overlay will be applied until the end of the input video if inputLoop is true. Else, if inputLoop is false, then overlay will last as long as the duration of the overlay media. + End *string `json:"end,omitempty"` + // FadeInDuration - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty"` + // FadeOutDuration - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty"` + // AudioGainLevel - The gain level of audio in the overlay. The value should be in the range [0, 1.0]. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty"` + // OdataType - Possible values include: 'OdataTypeOverlay', 'OdataTypeMicrosoftMediaAudioOverlay', 'OdataTypeMicrosoftMediaVideoOverlay' + OdataType OdataTypeBasicOverlay `json:"@odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for VideoOverlay. +func (vo VideoOverlay) MarshalJSON() ([]byte, error) { + vo.OdataType = OdataTypeMicrosoftMediaVideoOverlay + objectMap := make(map[string]interface{}) + if vo.Position != nil { + objectMap["position"] = vo.Position + } + if vo.Opacity != nil { + objectMap["opacity"] = vo.Opacity + } + if vo.CropRectangle != nil { + objectMap["cropRectangle"] = vo.CropRectangle + } + if vo.InputLabel != nil { + objectMap["inputLabel"] = vo.InputLabel + } + if vo.Start != nil { + objectMap["start"] = vo.Start + } + if vo.End != nil { + objectMap["end"] = vo.End + } + if vo.FadeInDuration != nil { + objectMap["fadeInDuration"] = vo.FadeInDuration + } + if vo.FadeOutDuration != nil { + objectMap["fadeOutDuration"] = vo.FadeOutDuration + } + if vo.AudioGainLevel != nil { + objectMap["audioGainLevel"] = vo.AudioGainLevel + } + if vo.OdataType != "" { + objectMap["@odata.type"] = vo.OdataType + } + return json.Marshal(objectMap) +} + +// AsAudioOverlay is the BasicOverlay implementation for VideoOverlay. +func (vo VideoOverlay) AsAudioOverlay() (*AudioOverlay, bool) { + return nil, false +} + +// AsVideoOverlay is the BasicOverlay implementation for VideoOverlay. +func (vo VideoOverlay) AsVideoOverlay() (*VideoOverlay, bool) { + return &vo, true +} + +// AsOverlay is the BasicOverlay implementation for VideoOverlay. +func (vo VideoOverlay) AsOverlay() (*Overlay, bool) { + return nil, false +} + +// AsBasicOverlay is the BasicOverlay implementation for VideoOverlay. +func (vo VideoOverlay) AsBasicOverlay() (BasicOverlay, bool) { + return &vo, true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/operations.go new file mode 100644 index 000000000000..2307c01e5c19 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/operations.go @@ -0,0 +1,147 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the client for the Operations methods of the Media service. +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all the Media Services operations. +func (client OperationsClient) List(ctx context.Context) (result OperationCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.oc.Response.Response != nil { + sc = result.oc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "media.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.oc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.OperationsClient", "List", resp, "Failure sending request") + return + } + + result.oc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Media/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationCollection) (result OperationCollection, err error) { + req, err := lastResults.operationCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.OperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.OperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.OperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client OperationsClient) ListComplete(ctx context.Context) (result OperationCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streamingendpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streamingendpoints.go new file mode 100644 index 000000000000..b7d824ae7ba1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streamingendpoints.go @@ -0,0 +1,778 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// StreamingEndpointsClient is the client for the StreamingEndpoints methods of the Media service. +type StreamingEndpointsClient struct { + BaseClient +} + +// NewStreamingEndpointsClient creates an instance of the StreamingEndpointsClient client. +func NewStreamingEndpointsClient(subscriptionID string) StreamingEndpointsClient { + return NewStreamingEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewStreamingEndpointsClientWithBaseURI creates an instance of the StreamingEndpointsClient client. +func NewStreamingEndpointsClientWithBaseURI(baseURI string, subscriptionID string) StreamingEndpointsClient { + return StreamingEndpointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +// parameters - streamingEndpoint properties needed for creation. +// autoStart - the flag indicates if the resource should be automatically started on creation. +func (client StreamingEndpointsClient) Create(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string, parameters StreamingEndpoint, autoStart *bool) (result StreamingEndpointsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.StreamingEndpointProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingEndpointProperties.ScaleUnits", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, streamingEndpointName, parameters, autoStart) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client StreamingEndpointsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string, parameters StreamingEndpoint, autoStart *bool) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if autoStart != nil { + queryParameters["autoStart"] = autorest.Encode("query", *autoStart) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) CreateSender(req *http.Request) (future StreamingEndpointsCreateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) CreateResponder(resp *http.Response) (result StreamingEndpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +func (client StreamingEndpointsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (result StreamingEndpointsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, streamingEndpointName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client StreamingEndpointsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) DeleteSender(req *http.Request) (future StreamingEndpointsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +func (client StreamingEndpointsClient) Get(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (result StreamingEndpoint, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, streamingEndpointName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client StreamingEndpointsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) GetResponder(resp *http.Response) (result StreamingEndpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the StreamingEndpoints in the account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +func (client StreamingEndpointsClient) List(ctx context.Context, resourceGroupName string, accountName string) (result StreamingEndpointListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.List") + defer func() { + sc := -1 + if result.selr.Response.Response != nil { + sc = result.selr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.selr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "List", resp, "Failure sending request") + return + } + + result.selr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client StreamingEndpointsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) ListResponder(resp *http.Response) (result StreamingEndpointListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client StreamingEndpointsClient) listNextResults(ctx context.Context, lastResults StreamingEndpointListResult) (result StreamingEndpointListResult, err error) { + req, err := lastResults.streamingEndpointListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client StreamingEndpointsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result StreamingEndpointListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName) + return +} + +// Scale scales an existing StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +// parameters - streamingEndpoint scale parameters +func (client StreamingEndpointsClient) Scale(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string, parameters StreamingEntityScaleUnit) (result StreamingEndpointsScaleFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Scale") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Scale", err.Error()) + } + + req, err := client.ScalePreparer(ctx, resourceGroupName, accountName, streamingEndpointName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Scale", nil, "Failure preparing request") + return + } + + result, err = client.ScaleSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Scale", result.Response(), "Failure sending request") + return + } + + return +} + +// ScalePreparer prepares the Scale request. +func (client StreamingEndpointsClient) ScalePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string, parameters StreamingEntityScaleUnit) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}/scale", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ScaleSender sends the Scale request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) ScaleSender(req *http.Request) (future StreamingEndpointsScaleFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ScaleResponder handles the response to the Scale request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) ScaleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start starts an existing StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +func (client StreamingEndpointsClient) Start(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (result StreamingEndpointsStartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Start") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Start", err.Error()) + } + + req, err := client.StartPreparer(ctx, resourceGroupName, accountName, streamingEndpointName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client StreamingEndpointsClient) StartPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) StartSender(req *http.Request) (future StreamingEndpointsStartFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop stops an existing StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +func (client StreamingEndpointsClient) Stop(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (result StreamingEndpointsStopFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Stop") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Stop", err.Error()) + } + + req, err := client.StopPreparer(ctx, resourceGroupName, accountName, streamingEndpointName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Stop", nil, "Failure preparing request") + return + } + + result, err = client.StopSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Stop", result.Response(), "Failure sending request") + return + } + + return +} + +// StopPreparer prepares the Stop request. +func (client StreamingEndpointsClient) StopPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}/stop", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) StopSender(req *http.Request) (future StreamingEndpointsStopFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates a existing StreamingEndpoint. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingEndpointName - the name of the StreamingEndpoint. +// parameters - streamingEndpoint properties needed for creation. +func (client StreamingEndpointsClient) Update(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string, parameters StreamingEndpoint) (result StreamingEndpointsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingEndpointsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: streamingEndpointName, + Constraints: []validation.Constraint{{Target: "streamingEndpointName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "streamingEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("media.StreamingEndpointsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, streamingEndpointName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingEndpointsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client StreamingEndpointsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingEndpointName string, parameters StreamingEndpoint) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingEndpointName": autorest.Encode("path", streamingEndpointName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/streamingEndpoints/{streamingEndpointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingEndpointsClient) UpdateSender(req *http.Request) (future StreamingEndpointsUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client StreamingEndpointsClient) UpdateResponder(resp *http.Response) (result StreamingEndpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streaminglocators.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streaminglocators.go new file mode 100644 index 000000000000..6c6496b57e4a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streaminglocators.go @@ -0,0 +1,576 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// StreamingLocatorsClient is the client for the StreamingLocators methods of the Media service. +type StreamingLocatorsClient struct { + BaseClient +} + +// NewStreamingLocatorsClient creates an instance of the StreamingLocatorsClient client. +func NewStreamingLocatorsClient(subscriptionID string) StreamingLocatorsClient { + return NewStreamingLocatorsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewStreamingLocatorsClientWithBaseURI creates an instance of the StreamingLocatorsClient client. +func NewStreamingLocatorsClientWithBaseURI(baseURI string, subscriptionID string) StreamingLocatorsClient { + return StreamingLocatorsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create a Streaming Locator in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingLocatorName - the Streaming Locator name. +// parameters - the request parameters +func (client StreamingLocatorsClient) Create(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string, parameters StreamingLocator) (result StreamingLocator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.StreamingLocatorProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingLocatorProperties.AssetName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingLocatorProperties.StreamingPolicyName", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("media.StreamingLocatorsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, streamingLocatorName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client StreamingLocatorsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string, parameters StreamingLocator) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingLocatorName": autorest.Encode("path", streamingLocatorName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingLocators/{streamingLocatorName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingLocatorsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client StreamingLocatorsClient) CreateResponder(resp *http.Response) (result StreamingLocator, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Streaming Locator in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingLocatorName - the Streaming Locator name. +func (client StreamingLocatorsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, streamingLocatorName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client StreamingLocatorsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingLocatorName": autorest.Encode("path", streamingLocatorName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingLocators/{streamingLocatorName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingLocatorsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client StreamingLocatorsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of a Streaming Locator in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingLocatorName - the Streaming Locator name. +func (client StreamingLocatorsClient) Get(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (result StreamingLocator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, streamingLocatorName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client StreamingLocatorsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingLocatorName": autorest.Encode("path", streamingLocatorName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingLocators/{streamingLocatorName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingLocatorsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client StreamingLocatorsClient) GetResponder(resp *http.Response) (result StreamingLocator, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the Streaming Locators in the account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filter - restricts the set of items returned. +// top - specifies a non-negative integer n that limits the number of items returned from a collection. The +// service returns the number of available items up to but not greater than the specified value n. +// orderby - specifies the key by which the result collection should be ordered. +func (client StreamingLocatorsClient) List(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result StreamingLocatorCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.List") + defer func() { + sc := -1 + if result.slc.Response.Response != nil { + sc = result.slc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, filter, top, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.slc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "List", resp, "Failure sending request") + return + } + + result.slc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client StreamingLocatorsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingLocators", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingLocatorsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client StreamingLocatorsClient) ListResponder(resp *http.Response) (result StreamingLocatorCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client StreamingLocatorsClient) listNextResults(ctx context.Context, lastResults StreamingLocatorCollection) (result StreamingLocatorCollection, err error) { + req, err := lastResults.streamingLocatorCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client StreamingLocatorsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result StreamingLocatorCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, filter, top, orderby) + return +} + +// ListContentKeys list Content Keys used by this Streaming Locator +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingLocatorName - the Streaming Locator name. +func (client StreamingLocatorsClient) ListContentKeys(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (result ListContentKeysResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.ListContentKeys") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListContentKeysPreparer(ctx, resourceGroupName, accountName, streamingLocatorName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "ListContentKeys", nil, "Failure preparing request") + return + } + + resp, err := client.ListContentKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "ListContentKeys", resp, "Failure sending request") + return + } + + result, err = client.ListContentKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "ListContentKeys", resp, "Failure responding to request") + } + + return +} + +// ListContentKeysPreparer prepares the ListContentKeys request. +func (client StreamingLocatorsClient) ListContentKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingLocatorName": autorest.Encode("path", streamingLocatorName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingLocators/{streamingLocatorName}/listContentKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListContentKeysSender sends the ListContentKeys request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingLocatorsClient) ListContentKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListContentKeysResponder handles the response to the ListContentKeys request. The method always +// closes the http.Response Body. +func (client StreamingLocatorsClient) ListContentKeysResponder(resp *http.Response) (result ListContentKeysResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListPaths list Paths supported by this Streaming Locator +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingLocatorName - the Streaming Locator name. +func (client StreamingLocatorsClient) ListPaths(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (result ListPathsResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingLocatorsClient.ListPaths") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPathsPreparer(ctx, resourceGroupName, accountName, streamingLocatorName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "ListPaths", nil, "Failure preparing request") + return + } + + resp, err := client.ListPathsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "ListPaths", resp, "Failure sending request") + return + } + + result, err = client.ListPathsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingLocatorsClient", "ListPaths", resp, "Failure responding to request") + } + + return +} + +// ListPathsPreparer prepares the ListPaths request. +func (client StreamingLocatorsClient) ListPathsPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingLocatorName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingLocatorName": autorest.Encode("path", streamingLocatorName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingLocators/{streamingLocatorName}/listPaths", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListPathsSender sends the ListPaths request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingLocatorsClient) ListPathsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListPathsResponder handles the response to the ListPaths request. The method always +// closes the http.Response Body. +func (client StreamingLocatorsClient) ListPathsResponder(resp *http.Response) (result ListPathsResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streamingpolicies.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streamingpolicies.go new file mode 100644 index 000000000000..649baab1ab24 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/streamingpolicies.go @@ -0,0 +1,452 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// StreamingPoliciesClient is the client for the StreamingPolicies methods of the Media service. +type StreamingPoliciesClient struct { + BaseClient +} + +// NewStreamingPoliciesClient creates an instance of the StreamingPoliciesClient client. +func NewStreamingPoliciesClient(subscriptionID string) StreamingPoliciesClient { + return NewStreamingPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewStreamingPoliciesClientWithBaseURI creates an instance of the StreamingPoliciesClient client. +func NewStreamingPoliciesClientWithBaseURI(baseURI string, subscriptionID string) StreamingPoliciesClient { + return StreamingPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create a Streaming Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingPolicyName - the Streaming Policy name. +// parameters - the request parameters +func (client StreamingPoliciesClient) Create(ctx context.Context, resourceGroupName string, accountName string, streamingPolicyName string, parameters StreamingPolicy) (result StreamingPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPoliciesClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.EnvelopeEncryption", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.EnvelopeEncryption.EnabledProtocols", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.EnvelopeEncryption.EnabledProtocols.Download", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.EnvelopeEncryption.EnabledProtocols.Dash", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.EnvelopeEncryption.EnabledProtocols.Hls", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.EnvelopeEncryption.EnabledProtocols.SmoothStreaming", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCenc", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.CommonEncryptionCenc.EnabledProtocols", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.CommonEncryptionCenc.EnabledProtocols.Download", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCenc.EnabledProtocols.Dash", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCenc.EnabledProtocols.Hls", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCenc.EnabledProtocols.SmoothStreaming", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.EnabledProtocols", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.EnabledProtocols.Download", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.EnabledProtocols.Dash", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.EnabledProtocols.Hls", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.EnabledProtocols.SmoothStreaming", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.Drm", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.Drm.FairPlay", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.CommonEncryptionCbcs.Drm.FairPlay.AllowPersistentLicense", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}, + {Target: "parameters.StreamingPolicyProperties.NoEncryption", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.NoEncryption.EnabledProtocols", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.StreamingPolicyProperties.NoEncryption.EnabledProtocols.Download", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.NoEncryption.EnabledProtocols.Dash", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.NoEncryption.EnabledProtocols.Hls", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.StreamingPolicyProperties.NoEncryption.EnabledProtocols.SmoothStreaming", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("media.StreamingPoliciesClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, streamingPolicyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client StreamingPoliciesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingPolicyName string, parameters StreamingPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingPolicyName": autorest.Encode("path", streamingPolicyName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingPolicies/{streamingPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingPoliciesClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client StreamingPoliciesClient) CreateResponder(resp *http.Response) (result StreamingPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Streaming Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingPolicyName - the Streaming Policy name. +func (client StreamingPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, streamingPolicyName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, streamingPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client StreamingPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, streamingPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingPolicyName": autorest.Encode("path", streamingPolicyName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingPolicies/{streamingPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client StreamingPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the details of a Streaming Policy in the Media Services account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// streamingPolicyName - the Streaming Policy name. +func (client StreamingPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string, streamingPolicyName string) (result StreamingPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, streamingPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client StreamingPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, streamingPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "streamingPolicyName": autorest.Encode("path", streamingPolicyName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingPolicies/{streamingPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client StreamingPoliciesClient) GetResponder(resp *http.Response) (result StreamingPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the Streaming Policies in the account +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filter - restricts the set of items returned. +// top - specifies a non-negative integer n that limits the number of items returned from a collection. The +// service returns the number of available items up to but not greater than the specified value n. +// orderby - specifies the key by which the result collection should be ordered. +func (client StreamingPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result StreamingPolicyCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPoliciesClient.List") + defer func() { + sc := -1 + if result.spc.Response.Response != nil { + sc = result.spc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, filter, top, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.spc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "List", resp, "Failure sending request") + return + } + + result.spc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client StreamingPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/streamingPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client StreamingPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client StreamingPoliciesClient) ListResponder(resp *http.Response) (result StreamingPolicyCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client StreamingPoliciesClient) listNextResults(ctx context.Context, lastResults StreamingPolicyCollection) (result StreamingPolicyCollection, err error) { + req, err := lastResults.streamingPolicyCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.StreamingPoliciesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client StreamingPoliciesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, top *int32, orderby string) (result StreamingPolicyCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/StreamingPoliciesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, filter, top, orderby) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/transforms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/transforms.go new file mode 100644 index 000000000000..07bd8ce90280 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/transforms.go @@ -0,0 +1,493 @@ +package media + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TransformsClient is the client for the Transforms methods of the Media service. +type TransformsClient struct { + BaseClient +} + +// NewTransformsClient creates an instance of the TransformsClient client. +func NewTransformsClient(subscriptionID string) TransformsClient { + return NewTransformsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTransformsClientWithBaseURI creates an instance of the TransformsClient client. +func NewTransformsClientWithBaseURI(baseURI string, subscriptionID string) TransformsClient { + return TransformsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a new Transform. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// parameters - the request parameters +func (client TransformsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, transformName string, parameters Transform) (result Transform, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TransformProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.TransformProperties.Outputs", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("media.TransformsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, transformName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.TransformsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TransformsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, parameters Transform) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TransformsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TransformsClient) CreateOrUpdateResponder(resp *http.Response) (result Transform, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Transform. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +func (client TransformsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, transformName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, transformName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TransformsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TransformsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TransformsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Transform. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +func (client TransformsClient) Get(ctx context.Context, resourceGroupName string, accountName string, transformName string) (result Transform, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, transformName) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client TransformsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client TransformsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client TransformsClient) GetResponder(resp *http.Response) (result Transform, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the Transforms in the account. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// filter - restricts the set of items returned. +// orderby - specifies the the key by which the result collection should be ordered. +func (client TransformsClient) List(ctx context.Context, resourceGroupName string, accountName string, filter string, orderby string) (result TransformCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformsClient.List") + defer func() { + sc := -1 + if result.tc.Response.Response != nil { + sc = result.tc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, accountName, filter, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.tc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.TransformsClient", "List", resp, "Failure sending request") + return + } + + result.tc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TransformsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, filter string, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TransformsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TransformsClient) ListResponder(resp *http.Response) (result TransformCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TransformsClient) listNextResults(ctx context.Context, lastResults TransformCollection) (result TransformCollection, err error) { + req, err := lastResults.transformCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "media.TransformsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "media.TransformsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TransformsClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, filter string, orderby string) (result TransformCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, accountName, filter, orderby) + return +} + +// Update updates a Transform. +// Parameters: +// resourceGroupName - the name of the resource group within the Azure subscription. +// accountName - the Media Services account name. +// transformName - the Transform name. +// parameters - the request parameters +func (client TransformsClient) Update(ctx context.Context, resourceGroupName string, accountName string, transformName string, parameters Transform) (result Transform, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TransformsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, transformName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "media.TransformsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client TransformsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, transformName string, parameters Transform) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transformName": autorest.Encode("path", transformName), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client TransformsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client TransformsClient) UpdateResponder(resp *http.Response) (result Transform, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/version.go new file mode 100644 index 000000000000..9e4f1d9a5da1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media/version.go @@ -0,0 +1,30 @@ +package media + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " media/2018-07-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/expressrouteportslocations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/expressrouteportslocations.go index 92966baf7343..bd1fa549e14f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/expressrouteportslocations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/expressrouteportslocations.go @@ -117,7 +117,7 @@ func (client ExpressRoutePortsLocationsClient) GetResponder(resp *http.Response) } // List retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each location. -// Available bandwidths can only be obtained when retriving a specific peering location. +// Available bandwidths can only be obtained when retrieving a specific peering location. func (client ExpressRoutePortsLocationsClient) List(ctx context.Context) (result ExpressRoutePortsLocationListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRoutePortsLocationsClient.List") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/models.go index b7a226390532..72feddc8c2d3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/models.go @@ -4364,11 +4364,11 @@ type ApplicationGatewayWebApplicationFirewallConfiguration struct { DisabledRuleGroups *[]ApplicationGatewayFirewallDisabledRuleGroup `json:"disabledRuleGroups,omitempty"` // RequestBodyCheck - Whether allow WAF to check request Body. RequestBodyCheck *bool `json:"requestBodyCheck,omitempty"` - // MaxRequestBodySize - Maxium request body size for WAF. + // MaxRequestBodySize - Maximum request body size for WAF. MaxRequestBodySize *int32 `json:"maxRequestBodySize,omitempty"` - // MaxRequestBodySizeInKb - Maxium request body size in Kb for WAF. + // MaxRequestBodySizeInKb - Maximum request body size in Kb for WAF. MaxRequestBodySizeInKb *int32 `json:"maxRequestBodySizeInKb,omitempty"` - // FileUploadLimitInMb - Maxium file upload size in Mb for WAF. + // FileUploadLimitInMb - Maximum file upload size in Mb for WAF. FileUploadLimitInMb *int32 `json:"fileUploadLimitInMb,omitempty"` // Exclusions - The exclusion list. Exclusions *[]ApplicationGatewayFirewallExclusion `json:"exclusions,omitempty"` @@ -6627,7 +6627,7 @@ type ConfigurationDiagnosticProfile struct { Source *string `json:"source,omitempty"` // Destination - Traffic destination. Accepted values are: '*', IP Address/CIDR, Service Tag. Destination *string `json:"destination,omitempty"` - // DestinationPort - Traffice destination port. Accepted values are '*', port (for example, 3389) and port range (for example, 80-100). + // DestinationPort - Traffic destination port. Accepted values are '*', port (for example, 3389) and port range (for example, 80-100). DestinationPort *string `json:"destinationPort,omitempty"` } @@ -6738,7 +6738,7 @@ type ConnectionMonitorParameters struct { MonitoringIntervalInSeconds *int32 `json:"monitoringIntervalInSeconds,omitempty"` } -// ConnectionMonitorQueryResult list of connection states snaphots. +// ConnectionMonitorQueryResult list of connection states snapshots. type ConnectionMonitorQueryResult struct { autorest.Response `json:"-"` // SourceStatus - Status of connection monitor source. Possible values include: 'Uknown', 'Active', 'Inactive' @@ -7235,7 +7235,7 @@ func (cni *ContainerNetworkInterface) UnmarshalJSON(body []byte) error { return nil } -// ContainerNetworkInterfaceConfiguration container network interface configruation child resource. +// ContainerNetworkInterfaceConfiguration container network interface configuration child resource. type ContainerNetworkInterfaceConfiguration struct { // ContainerNetworkInterfaceConfigurationPropertiesFormat - Container network interface configuration properties. *ContainerNetworkInterfaceConfigurationPropertiesFormat `json:"properties,omitempty"` @@ -7433,7 +7433,7 @@ type ContainerNetworkInterfaceIPConfigurationPropertiesFormat struct { type ContainerNetworkInterfacePropertiesFormat struct { // ContainerNetworkInterfaceConfiguration - Container network interface configuration from which this container network interface is created. ContainerNetworkInterfaceConfiguration *ContainerNetworkInterfaceConfiguration `json:"containerNetworkInterfaceConfiguration,omitempty"` - // Container - Reference to the conatinaer to which this container network interface is attached. + // Container - Reference to the container to which this container network interface is attached. Container *Container `json:"container,omitempty"` // IPConfigurations - Reference to the ip configuration on this container nic. IPConfigurations *[]ContainerNetworkInterfaceIPConfiguration `json:"ipConfigurations,omitempty"` @@ -7946,17 +7946,17 @@ type EffectiveNetworkSecurityRule struct { SourcePortRange *string `json:"sourcePortRange,omitempty"` // DestinationPortRange - The destination port or range. DestinationPortRange *string `json:"destinationPortRange,omitempty"` - // SourcePortRanges - The source port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as seperator (e.g. 100-400), or an asterix (*) + // SourcePortRanges - The source port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as separator (e.g. 100-400), or an asterisk (*) SourcePortRanges *[]string `json:"sourcePortRanges,omitempty"` - // DestinationPortRanges - The destination port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as seperator (e.g. 100-400), or an asterix (*) + // DestinationPortRanges - The destination port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as separator (e.g. 100-400), or an asterisk (*) DestinationPortRanges *[]string `json:"destinationPortRanges,omitempty"` // SourceAddressPrefix - The source address prefix. SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` // DestinationAddressPrefix - The destination address prefix. DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` - // SourceAddressPrefixes - The source address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AureLoadBalancer, Internet), System Tags, and the asterix (*). + // SourceAddressPrefixes - The source address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AzureLoadBalancer, Internet), System Tags, and the asterisk (*). SourceAddressPrefixes *[]string `json:"sourceAddressPrefixes,omitempty"` - // DestinationAddressPrefixes - The destination address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AureLoadBalancer, Internet), System Tags, and the asterix (*). + // DestinationAddressPrefixes - The destination address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AzureLoadBalancer, Internet), System Tags, and the asterisk (*). DestinationAddressPrefixes *[]string `json:"destinationAddressPrefixes,omitempty"` // ExpandedSourceAddressPrefix - The expanded source address prefix. ExpandedSourceAddressPrefix *[]string `json:"expandedSourceAddressPrefix,omitempty"` @@ -11069,7 +11069,7 @@ type ExpressRoutePortPropertiesFormat struct { Mtu *string `json:"mtu,omitempty"` // Encapsulation - Encapsulation method on physical ports. Possible values include: 'Dot1Q', 'QinQ' Encapsulation ExpressRoutePortsEncapsulation `json:"encapsulation,omitempty"` - // EtherType - Ethertype of the physical port. + // EtherType - Ether type of the physical port. EtherType *string `json:"etherType,omitempty"` // AllocationDate - Date of the physical port allocation to be used in Letter of Authorization. AllocationDate *string `json:"allocationDate,omitempty"` @@ -14159,9 +14159,9 @@ func (icp *IPConfigurationProfile) UnmarshalJSON(body []byte) error { return nil } -// IPConfigurationProfilePropertiesFormat IP configruation profile properties. +// IPConfigurationProfilePropertiesFormat IP configuration profile properties. type IPConfigurationProfilePropertiesFormat struct { - // Subnet - The reference of the subnet resource to create a contatainer network interface ip configruation. + // Subnet - The reference of the subnet resource to create a container network interface ip configuration. Subnet *Subnet `json:"subnet,omitempty"` // ProvisioningState - The provisioning state of the resource. ProvisioningState *string `json:"provisioningState,omitempty"` @@ -17566,7 +17566,7 @@ type P2SVpnGatewayProperties struct { P2SVpnServerConfiguration *SubResource `json:"p2SVpnServerConfiguration,omitempty"` // VpnClientAddressPool - The reference of the address space resource which represents Address space for P2S VpnClient. VpnClientAddressPool *AddressSpace `json:"vpnClientAddressPool,omitempty"` - // VpnClientConnectionHealth - All P2S vpnclients' connection health status. + // VpnClientConnectionHealth - All P2S VPN clients' connection health status. VpnClientConnectionHealth *VpnClientConnectionHealth `json:"vpnClientConnectionHealth,omitempty"` } @@ -17951,7 +17951,7 @@ func (pvsc *P2SVpnServerConfiguration) UnmarshalJSON(body []byte) error { // P2SVpnServerConfigurationProperties parameters for P2SVpnServerConfiguration type P2SVpnServerConfigurationProperties struct { - // Name - The name of the P2SVpnServerConfiguration that is unique within a VirtualWan in a resource group. This name can be used to access the resource along with Paren VirtualWan resource name. + // Name - The name of the P2SVpnServerConfiguration that is unique within a VirtualWan in a resource group. This name can be used to access the resource along with Parent VirtualWan resource name. Name *string `json:"name,omitempty"` // VpnProtocols - vpnProtocols for the P2SVpnServerConfiguration. VpnProtocols *[]VpnGatewayTunnelingProtocol `json:"vpnProtocols,omitempty"` @@ -17967,7 +17967,7 @@ type P2SVpnServerConfigurationProperties struct { VpnClientIpsecPolicies *[]IpsecPolicy `json:"vpnClientIpsecPolicies,omitempty"` // RadiusServerAddress - The radius server address property of the P2SVpnServerConfiguration resource for point to site client connection. RadiusServerAddress *string `json:"radiusServerAddress,omitempty"` - // RadiusServerSecret - The radius secret property of the P2SVpnServerConfiguration resource for for point to site client connection. + // RadiusServerSecret - The radius secret property of the P2SVpnServerConfiguration resource for point to site client connection. RadiusServerSecret *string `json:"radiusServerSecret,omitempty"` // ProvisioningState - The provisioning state of the P2SVpnServerConfiguration resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. ProvisioningState *string `json:"provisioningState,omitempty"` @@ -22040,17 +22040,17 @@ type SecurityRulePropertiesFormat struct { Description *string `json:"description,omitempty"` // Protocol - Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'. Possible values include: 'SecurityRuleProtocolTCP', 'SecurityRuleProtocolUDP', 'SecurityRuleProtocolAsterisk' Protocol SecurityRuleProtocol `json:"protocol,omitempty"` - // SourcePortRange - The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. + // SourcePortRange - The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. SourcePortRange *string `json:"sourcePortRange,omitempty"` - // DestinationPortRange - The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. + // DestinationPortRange - The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. DestinationPortRange *string `json:"destinationPortRange,omitempty"` - // SourceAddressPrefix - The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. + // SourceAddressPrefix - The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` // SourceAddressPrefixes - The CIDR or source IP ranges. SourceAddressPrefixes *[]string `json:"sourceAddressPrefixes,omitempty"` // SourceApplicationSecurityGroups - The application security group specified as source. SourceApplicationSecurityGroups *[]ApplicationSecurityGroup `json:"sourceApplicationSecurityGroups,omitempty"` - // DestinationAddressPrefix - The destination address prefix. CIDR or destination IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. + // DestinationAddressPrefix - The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` // DestinationAddressPrefixes - The destination address prefixes. CIDR or destination IP ranges. DestinationAddressPrefixes *[]string `json:"destinationAddressPrefixes,omitempty"` @@ -22064,7 +22064,7 @@ type SecurityRulePropertiesFormat struct { Access SecurityRuleAccess `json:"access,omitempty"` // Priority - The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. Priority *int32 `json:"priority,omitempty"` - // Direction - The direction of the rule. The direction specifies if rule will be evaluated on incoming or outcoming traffic. Possible values are: 'Inbound' and 'Outbound'. Possible values include: 'SecurityRuleDirectionInbound', 'SecurityRuleDirectionOutbound' + // Direction - The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'. Possible values include: 'SecurityRuleDirectionInbound', 'SecurityRuleDirectionOutbound' Direction SecurityRuleDirection `json:"direction,omitempty"` // ProvisioningState - The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. ProvisioningState *string `json:"provisioningState,omitempty"` @@ -24382,7 +24382,7 @@ type VirtualNetworkGatewayConnectionListEntityPropertiesFormat struct { VirtualNetworkGateway2 *VirtualNetworkConnectionGatewayReference `json:"virtualNetworkGateway2,omitempty"` // LocalNetworkGateway2 - The reference to local network gateway resource. LocalNetworkGateway2 *VirtualNetworkConnectionGatewayReference `json:"localNetworkGateway2,omitempty"` - // ConnectionType - Gateway connection type. Possible values are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient' + // ConnectionType - Gateway connection type. Possible values are: 'IPsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient' ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` // ConnectionProtocol - Connection protocol used for this connection. Possible values include: 'IKEv2', 'IKEv1' ConnectionProtocol VirtualNetworkGatewayConnectionProtocol `json:"connectionProtocol,omitempty"` @@ -24572,7 +24572,7 @@ type VirtualNetworkGatewayConnectionPropertiesFormat struct { VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` // LocalNetworkGateway2 - The reference to local network gateway resource. LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` - // ConnectionType - Gateway connection type. Possible values are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient' + // ConnectionType - Gateway connection type. Possible values are: 'IPsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient' ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` // ConnectionProtocol - Connection protocol used for this connection. Possible values include: 'IKEv2', 'IKEv1' ConnectionProtocol VirtualNetworkGatewayConnectionProtocol `json:"connectionProtocol,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/profiles.go index bdf55bed8b18..191cd04643bb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/profiles.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/profiles.go @@ -199,7 +199,7 @@ func (client ProfilesClient) DeleteResponder(resp *http.Response) (result autore // Get gets the specified network profile in a specified resource group. // Parameters: // resourceGroupName - the name of the resource group. -// networkProfileName - the name of the PublicIPPrefx. +// networkProfileName - the name of the Public IP Prefix. // expand - expands referenced resources. func (client ProfilesClient) Get(ctx context.Context, resourceGroupName string, networkProfileName string, expand string) (result Profile, err error) { if tracing.IsEnabled() { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/publicipprefixes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/publicipprefixes.go index 2e930c4ad9d7..c819b8133798 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/publicipprefixes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/publicipprefixes.go @@ -199,7 +199,7 @@ func (client PublicIPPrefixesClient) DeleteResponder(resp *http.Response) (resul // Get gets the specified public IP prefix in a specified resource group. // Parameters: // resourceGroupName - the name of the resource group. -// publicIPPrefixName - the name of the PublicIPPrefx. +// publicIPPrefixName - the name of the Public IP Prefix. // expand - expands referenced resources. func (client PublicIPPrefixesClient) Get(ctx context.Context, resourceGroupName string, publicIPPrefixName string, expand string) (result PublicIPPrefix, err error) { if tracing.IsEnabled() { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/vpnsites.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/vpnsites.go index f905a49277a1..65f372b50735 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/vpnsites.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network/vpnsites.go @@ -196,7 +196,7 @@ func (client VpnSitesClient) DeleteResponder(resp *http.Response) (result autore return } -// Get retrieves the details of a VPNsite. +// Get retrieves the details of a VPN site. // Parameters: // resourceGroupName - the resource group name of the VpnSite. // vpnSiteName - the name of the VpnSite being retrieved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/client.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/client.go index 677813b81fba..a2db4dbdb3c7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/client.go @@ -1,4 +1,4 @@ -// Package eventgrid implements the Azure ARM Eventgrid service API version 2018-01-01. +// Package eventgrid implements the Azure ARM Eventgrid service API version 2018-09-15-preview. // // Azure EventGrid Management Client package eventgrid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/domains.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/domains.go new file mode 100644 index 000000000000..ea412b84e25b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/domains.go @@ -0,0 +1,665 @@ +package eventgrid + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DomainsClient is the azure EventGrid Management Client +type DomainsClient struct { + BaseClient +} + +// NewDomainsClient creates an instance of the DomainsClient client. +func NewDomainsClient(subscriptionID string) DomainsClient { + return NewDomainsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDomainsClientWithBaseURI creates an instance of the DomainsClient client. +func NewDomainsClientWithBaseURI(baseURI string, subscriptionID string) DomainsClient { + return DomainsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate asynchronously creates a new domain with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +// domainInfo - domain information +func (client DomainsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, domainName string, domainInfo Domain) (result DomainsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, domainName, domainInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DomainsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, domainName string, domainInfo Domain) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}", pathParameters), + autorest.WithJSON(domainInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) CreateOrUpdateSender(req *http.Request) (future DomainsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DomainsClient) CreateOrUpdateResponder(resp *http.Response) (result Domain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete existing domain +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +func (client DomainsClient) Delete(ctx context.Context, resourceGroupName string, domainName string) (result DomainsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, domainName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DomainsClient) DeletePreparer(ctx context.Context, resourceGroupName string, domainName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) DeleteSender(req *http.Request) (future DomainsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DomainsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get properties of a domain +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +func (client DomainsClient) Get(ctx context.Context, resourceGroupName string, domainName string) (result Domain, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, domainName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DomainsClient) GetPreparer(ctx context.Context, resourceGroupName string, domainName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DomainsClient) GetResponder(resp *http.Response) (result Domain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup list all the domains under a resource group +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +func (client DomainsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DomainsListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DomainsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DomainsClient) ListByResourceGroupResponder(resp *http.Response) (result DomainsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscription list all the domains under an Azure subscription +func (client DomainsClient) ListBySubscription(ctx context.Context) (result DomainsListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListBySubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client DomainsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/domains", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client DomainsClient) ListBySubscriptionResponder(resp *http.Response) (result DomainsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSharedAccessKeys list the two keys used to publish to a domain +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +func (client DomainsClient) ListSharedAccessKeys(ctx context.Context, resourceGroupName string, domainName string) (result DomainSharedAccessKeys, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.ListSharedAccessKeys") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListSharedAccessKeysPreparer(ctx, resourceGroupName, domainName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListSharedAccessKeys", nil, "Failure preparing request") + return + } + + resp, err := client.ListSharedAccessKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListSharedAccessKeys", resp, "Failure sending request") + return + } + + result, err = client.ListSharedAccessKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "ListSharedAccessKeys", resp, "Failure responding to request") + } + + return +} + +// ListSharedAccessKeysPreparer prepares the ListSharedAccessKeys request. +func (client DomainsClient) ListSharedAccessKeysPreparer(ctx context.Context, resourceGroupName string, domainName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSharedAccessKeysSender sends the ListSharedAccessKeys request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) ListSharedAccessKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListSharedAccessKeysResponder handles the response to the ListSharedAccessKeys request. The method always +// closes the http.Response Body. +func (client DomainsClient) ListSharedAccessKeysResponder(resp *http.Response) (result DomainSharedAccessKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerate a shared access key for a domain +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +// regenerateKeyRequest - request body to regenerate key +func (client DomainsClient) RegenerateKey(ctx context.Context, resourceGroupName string, domainName string, regenerateKeyRequest DomainRegenerateKeyRequest) (result DomainSharedAccessKeys, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.RegenerateKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: regenerateKeyRequest, + Constraints: []validation.Constraint{{Target: "regenerateKeyRequest.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("eventgrid.DomainsClient", "RegenerateKey", err.Error()) + } + + req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, domainName, regenerateKeyRequest) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "RegenerateKey", nil, "Failure preparing request") + return + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "RegenerateKey", resp, "Failure sending request") + return + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client DomainsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, domainName string, regenerateKeyRequest DomainRegenerateKeyRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/regenerateKey", pathParameters), + autorest.WithJSON(regenerateKeyRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client DomainsClient) RegenerateKeyResponder(resp *http.Response) (result DomainSharedAccessKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update asynchronously updates a domain with the specified parameters. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +// domainUpdateParameters - domain update information +func (client DomainsClient) Update(ctx context.Context, resourceGroupName string, domainName string, domainUpdateParameters DomainUpdateParameters) (result DomainsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, domainName, domainUpdateParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client DomainsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, domainName string, domainUpdateParameters DomainUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}", pathParameters), + autorest.WithJSON(domainUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DomainsClient) UpdateSender(req *http.Request) (future DomainsUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DomainsClient) UpdateResponder(resp *http.Response) (result Domain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/domaintopics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/domaintopics.go new file mode 100644 index 000000000000..7f97a096ad49 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/domaintopics.go @@ -0,0 +1,197 @@ +package eventgrid + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DomainTopicsClient is the azure EventGrid Management Client +type DomainTopicsClient struct { + BaseClient +} + +// NewDomainTopicsClient creates an instance of the DomainTopicsClient client. +func NewDomainTopicsClient(subscriptionID string) DomainTopicsClient { + return NewDomainTopicsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDomainTopicsClientWithBaseURI creates an instance of the DomainTopicsClient client. +func NewDomainTopicsClientWithBaseURI(baseURI string, subscriptionID string) DomainTopicsClient { + return DomainTopicsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get properties of a domain topic +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the domain +// topicName - name of the topic +func (client DomainTopicsClient) Get(ctx context.Context, resourceGroupName string, domainName string, topicName string) (result DomainTopic, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainTopicsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, domainName, topicName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainTopicsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainTopicsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainTopicsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DomainTopicsClient) GetPreparer(ctx context.Context, resourceGroupName string, domainName string, topicName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DomainTopicsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DomainTopicsClient) GetResponder(resp *http.Response) (result DomainTopic, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByDomain list all the topics in a domain. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - domain name. +func (client DomainTopicsClient) ListByDomain(ctx context.Context, resourceGroupName string, domainName string) (result DomainTopicsListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DomainTopicsClient.ListByDomain") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByDomainPreparer(ctx, resourceGroupName, domainName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainTopicsClient", "ListByDomain", nil, "Failure preparing request") + return + } + + resp, err := client.ListByDomainSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.DomainTopicsClient", "ListByDomain", resp, "Failure sending request") + return + } + + result, err = client.ListByDomainResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainTopicsClient", "ListByDomain", resp, "Failure responding to request") + } + + return +} + +// ListByDomainPreparer prepares the ListByDomain request. +func (client DomainTopicsClient) ListByDomainPreparer(ctx context.Context, resourceGroupName string, domainName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByDomainSender sends the ListByDomain request. The method will close the +// http.Response Body if it receives an error. +func (client DomainTopicsClient) ListByDomainSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByDomainResponder handles the response to the ListByDomain request. The method always +// closes the http.Response Body. +func (client DomainTopicsClient) ListByDomainResponder(resp *http.Response) (result DomainTopicsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/eventsubscriptions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/eventsubscriptions.go similarity index 92% rename from vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/eventsubscriptions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/eventsubscriptions.go index cb84fb01fac8..5f68dccbf8b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/eventsubscriptions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/eventsubscriptions.go @@ -87,7 +87,7 @@ func (client EventSubscriptionsClient) CreateOrUpdatePreparer(ctx context.Contex "scope": scope, } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -172,7 +172,7 @@ func (client EventSubscriptionsClient) DeletePreparer(ctx context.Context, scope "scope": scope, } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -260,7 +260,7 @@ func (client EventSubscriptionsClient) GetPreparer(ctx context.Context, scope st "scope": scope, } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -343,7 +343,7 @@ func (client EventSubscriptionsClient) GetFullURLPreparer(ctx context.Context, s "scope": scope, } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -376,6 +376,85 @@ func (client EventSubscriptionsClient) GetFullURLResponder(resp *http.Response) return } +// ListByDomainTopic list all event subscriptions that have been created for a specific domain topic +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// domainName - name of the top level domain +// topicName - name of the domain topic +func (client EventSubscriptionsClient) ListByDomainTopic(ctx context.Context, resourceGroupName string, domainName string, topicName string) (result EventSubscriptionsListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EventSubscriptionsClient.ListByDomainTopic") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByDomainTopicPreparer(ctx, resourceGroupName, domainName, topicName) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsClient", "ListByDomainTopic", nil, "Failure preparing request") + return + } + + resp, err := client.ListByDomainTopicSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsClient", "ListByDomainTopic", resp, "Failure sending request") + return + } + + result, err = client.ListByDomainTopicResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsClient", "ListByDomainTopic", resp, "Failure responding to request") + } + + return +} + +// ListByDomainTopicPreparer prepares the ListByDomainTopic request. +func (client EventSubscriptionsClient) ListByDomainTopicPreparer(ctx context.Context, resourceGroupName string, domainName string, topicName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + const APIVersion = "2018-09-15-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{topicName}/providers/Microsoft.EventGrid/eventSubscriptions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByDomainTopicSender sends the ListByDomainTopic request. The method will close the +// http.Response Body if it receives an error. +func (client EventSubscriptionsClient) ListByDomainTopicSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByDomainTopicResponder handles the response to the ListByDomainTopic request. The method always +// closes the http.Response Body. +func (client EventSubscriptionsClient) ListByDomainTopicResponder(resp *http.Response) (result EventSubscriptionsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // ListByResource list all event subscriptions that have been created for a specific topic // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. @@ -424,7 +503,7 @@ func (client EventSubscriptionsClient) ListByResourcePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -499,7 +578,7 @@ func (client EventSubscriptionsClient) ListGlobalByResourceGroupPreparer(ctx con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -577,7 +656,7 @@ func (client EventSubscriptionsClient) ListGlobalByResourceGroupForTopicTypePrep "topicTypeName": autorest.Encode("path", topicTypeName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -649,7 +728,7 @@ func (client EventSubscriptionsClient) ListGlobalBySubscriptionPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -725,7 +804,7 @@ func (client EventSubscriptionsClient) ListGlobalBySubscriptionForTopicTypePrepa "topicTypeName": autorest.Encode("path", topicTypeName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -803,7 +882,7 @@ func (client EventSubscriptionsClient) ListRegionalByResourceGroupPreparer(ctx c "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -883,7 +962,7 @@ func (client EventSubscriptionsClient) ListRegionalByResourceGroupForTopicTypePr "topicTypeName": autorest.Encode("path", topicTypeName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -958,7 +1037,7 @@ func (client EventSubscriptionsClient) ListRegionalBySubscriptionPreparer(ctx co "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1036,7 +1115,7 @@ func (client EventSubscriptionsClient) ListRegionalBySubscriptionForTopicTypePre "topicTypeName": autorest.Encode("path", topicTypeName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1079,7 +1158,7 @@ func (client EventSubscriptionsClient) ListRegionalBySubscriptionForTopicTypeRes // for a resource, and // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}' // for an EventGrid topic. -// eventSubscriptionName - name of the event subscription to be created +// eventSubscriptionName - name of the event subscription to be updated // eventSubscriptionUpdateParameters - updated event subscription information func (client EventSubscriptionsClient) Update(ctx context.Context, scope string, eventSubscriptionName string, eventSubscriptionUpdateParameters EventSubscriptionUpdateParameters) (result EventSubscriptionsUpdateFuture, err error) { if tracing.IsEnabled() { @@ -1114,7 +1193,7 @@ func (client EventSubscriptionsClient) UpdatePreparer(ctx context.Context, scope "scope": scope, } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/models.go new file mode 100644 index 000000000000..6ad697f4b940 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/models.go @@ -0,0 +1,3765 @@ +package eventgrid + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid" + +// DomainProvisioningState enumerates the values for domain provisioning state. +type DomainProvisioningState string + +const ( + // Canceled ... + Canceled DomainProvisioningState = "Canceled" + // Creating ... + Creating DomainProvisioningState = "Creating" + // Deleting ... + Deleting DomainProvisioningState = "Deleting" + // Failed ... + Failed DomainProvisioningState = "Failed" + // Succeeded ... + Succeeded DomainProvisioningState = "Succeeded" + // Updating ... + Updating DomainProvisioningState = "Updating" +) + +// PossibleDomainProvisioningStateValues returns an array of possible values for the DomainProvisioningState const type. +func PossibleDomainProvisioningStateValues() []DomainProvisioningState { + return []DomainProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Updating} +} + +// EndpointType enumerates the values for endpoint type. +type EndpointType string + +const ( + // EndpointTypeEventHub ... + EndpointTypeEventHub EndpointType = "EventHub" + // EndpointTypeEventSubscriptionDestination ... + EndpointTypeEventSubscriptionDestination EndpointType = "EventSubscriptionDestination" + // EndpointTypeHybridConnection ... + EndpointTypeHybridConnection EndpointType = "HybridConnection" + // EndpointTypeStorageQueue ... + EndpointTypeStorageQueue EndpointType = "StorageQueue" + // EndpointTypeWebHook ... + EndpointTypeWebHook EndpointType = "WebHook" +) + +// PossibleEndpointTypeValues returns an array of possible values for the EndpointType const type. +func PossibleEndpointTypeValues() []EndpointType { + return []EndpointType{EndpointTypeEventHub, EndpointTypeEventSubscriptionDestination, EndpointTypeHybridConnection, EndpointTypeStorageQueue, EndpointTypeWebHook} +} + +// EndpointTypeBasicDeadLetterDestination enumerates the values for endpoint type basic dead letter +// destination. +type EndpointTypeBasicDeadLetterDestination string + +const ( + // EndpointTypeDeadLetterDestination ... + EndpointTypeDeadLetterDestination EndpointTypeBasicDeadLetterDestination = "DeadLetterDestination" + // EndpointTypeStorageBlob ... + EndpointTypeStorageBlob EndpointTypeBasicDeadLetterDestination = "StorageBlob" +) + +// PossibleEndpointTypeBasicDeadLetterDestinationValues returns an array of possible values for the EndpointTypeBasicDeadLetterDestination const type. +func PossibleEndpointTypeBasicDeadLetterDestinationValues() []EndpointTypeBasicDeadLetterDestination { + return []EndpointTypeBasicDeadLetterDestination{EndpointTypeDeadLetterDestination, EndpointTypeStorageBlob} +} + +// EventDeliverySchema enumerates the values for event delivery schema. +type EventDeliverySchema string + +const ( + // CloudEventV01Schema ... + CloudEventV01Schema EventDeliverySchema = "CloudEventV01Schema" + // CustomInputSchema ... + CustomInputSchema EventDeliverySchema = "CustomInputSchema" + // EventGridSchema ... + EventGridSchema EventDeliverySchema = "EventGridSchema" +) + +// PossibleEventDeliverySchemaValues returns an array of possible values for the EventDeliverySchema const type. +func PossibleEventDeliverySchemaValues() []EventDeliverySchema { + return []EventDeliverySchema{CloudEventV01Schema, CustomInputSchema, EventGridSchema} +} + +// EventSubscriptionProvisioningState enumerates the values for event subscription provisioning state. +type EventSubscriptionProvisioningState string + +const ( + // EventSubscriptionProvisioningStateAwaitingManualAction ... + EventSubscriptionProvisioningStateAwaitingManualAction EventSubscriptionProvisioningState = "AwaitingManualAction" + // EventSubscriptionProvisioningStateCanceled ... + EventSubscriptionProvisioningStateCanceled EventSubscriptionProvisioningState = "Canceled" + // EventSubscriptionProvisioningStateCreating ... + EventSubscriptionProvisioningStateCreating EventSubscriptionProvisioningState = "Creating" + // EventSubscriptionProvisioningStateDeleting ... + EventSubscriptionProvisioningStateDeleting EventSubscriptionProvisioningState = "Deleting" + // EventSubscriptionProvisioningStateFailed ... + EventSubscriptionProvisioningStateFailed EventSubscriptionProvisioningState = "Failed" + // EventSubscriptionProvisioningStateSucceeded ... + EventSubscriptionProvisioningStateSucceeded EventSubscriptionProvisioningState = "Succeeded" + // EventSubscriptionProvisioningStateUpdating ... + EventSubscriptionProvisioningStateUpdating EventSubscriptionProvisioningState = "Updating" +) + +// PossibleEventSubscriptionProvisioningStateValues returns an array of possible values for the EventSubscriptionProvisioningState const type. +func PossibleEventSubscriptionProvisioningStateValues() []EventSubscriptionProvisioningState { + return []EventSubscriptionProvisioningState{EventSubscriptionProvisioningStateAwaitingManualAction, EventSubscriptionProvisioningStateCanceled, EventSubscriptionProvisioningStateCreating, EventSubscriptionProvisioningStateDeleting, EventSubscriptionProvisioningStateFailed, EventSubscriptionProvisioningStateSucceeded, EventSubscriptionProvisioningStateUpdating} +} + +// InputSchema enumerates the values for input schema. +type InputSchema string + +const ( + // InputSchemaCloudEventV01Schema ... + InputSchemaCloudEventV01Schema InputSchema = "CloudEventV01Schema" + // InputSchemaCustomEventSchema ... + InputSchemaCustomEventSchema InputSchema = "CustomEventSchema" + // InputSchemaEventGridSchema ... + InputSchemaEventGridSchema InputSchema = "EventGridSchema" +) + +// PossibleInputSchemaValues returns an array of possible values for the InputSchema const type. +func PossibleInputSchemaValues() []InputSchema { + return []InputSchema{InputSchemaCloudEventV01Schema, InputSchemaCustomEventSchema, InputSchemaEventGridSchema} +} + +// InputSchemaMappingType enumerates the values for input schema mapping type. +type InputSchemaMappingType string + +const ( + // InputSchemaMappingTypeInputSchemaMapping ... + InputSchemaMappingTypeInputSchemaMapping InputSchemaMappingType = "InputSchemaMapping" + // InputSchemaMappingTypeJSON ... + InputSchemaMappingTypeJSON InputSchemaMappingType = "Json" +) + +// PossibleInputSchemaMappingTypeValues returns an array of possible values for the InputSchemaMappingType const type. +func PossibleInputSchemaMappingTypeValues() []InputSchemaMappingType { + return []InputSchemaMappingType{InputSchemaMappingTypeInputSchemaMapping, InputSchemaMappingTypeJSON} +} + +// OperatorType enumerates the values for operator type. +type OperatorType string + +const ( + // OperatorTypeAdvancedFilter ... + OperatorTypeAdvancedFilter OperatorType = "AdvancedFilter" + // OperatorTypeBoolEquals ... + OperatorTypeBoolEquals OperatorType = "BoolEquals" + // OperatorTypeNumberGreaterThan ... + OperatorTypeNumberGreaterThan OperatorType = "NumberGreaterThan" + // OperatorTypeNumberGreaterThanOrEquals ... + OperatorTypeNumberGreaterThanOrEquals OperatorType = "NumberGreaterThanOrEquals" + // OperatorTypeNumberIn ... + OperatorTypeNumberIn OperatorType = "NumberIn" + // OperatorTypeNumberLessThan ... + OperatorTypeNumberLessThan OperatorType = "NumberLessThan" + // OperatorTypeNumberLessThanOrEquals ... + OperatorTypeNumberLessThanOrEquals OperatorType = "NumberLessThanOrEquals" + // OperatorTypeNumberNotIn ... + OperatorTypeNumberNotIn OperatorType = "NumberNotIn" + // OperatorTypeStringBeginsWith ... + OperatorTypeStringBeginsWith OperatorType = "StringBeginsWith" + // OperatorTypeStringContains ... + OperatorTypeStringContains OperatorType = "StringContains" + // OperatorTypeStringEndsWith ... + OperatorTypeStringEndsWith OperatorType = "StringEndsWith" + // OperatorTypeStringIn ... + OperatorTypeStringIn OperatorType = "StringIn" + // OperatorTypeStringNotIn ... + OperatorTypeStringNotIn OperatorType = "StringNotIn" +) + +// PossibleOperatorTypeValues returns an array of possible values for the OperatorType const type. +func PossibleOperatorTypeValues() []OperatorType { + return []OperatorType{OperatorTypeAdvancedFilter, OperatorTypeBoolEquals, OperatorTypeNumberGreaterThan, OperatorTypeNumberGreaterThanOrEquals, OperatorTypeNumberIn, OperatorTypeNumberLessThan, OperatorTypeNumberLessThanOrEquals, OperatorTypeNumberNotIn, OperatorTypeStringBeginsWith, OperatorTypeStringContains, OperatorTypeStringEndsWith, OperatorTypeStringIn, OperatorTypeStringNotIn} +} + +// ResourceRegionType enumerates the values for resource region type. +type ResourceRegionType string + +const ( + // GlobalResource ... + GlobalResource ResourceRegionType = "GlobalResource" + // RegionalResource ... + RegionalResource ResourceRegionType = "RegionalResource" +) + +// PossibleResourceRegionTypeValues returns an array of possible values for the ResourceRegionType const type. +func PossibleResourceRegionTypeValues() []ResourceRegionType { + return []ResourceRegionType{GlobalResource, RegionalResource} +} + +// TopicProvisioningState enumerates the values for topic provisioning state. +type TopicProvisioningState string + +const ( + // TopicProvisioningStateCanceled ... + TopicProvisioningStateCanceled TopicProvisioningState = "Canceled" + // TopicProvisioningStateCreating ... + TopicProvisioningStateCreating TopicProvisioningState = "Creating" + // TopicProvisioningStateDeleting ... + TopicProvisioningStateDeleting TopicProvisioningState = "Deleting" + // TopicProvisioningStateFailed ... + TopicProvisioningStateFailed TopicProvisioningState = "Failed" + // TopicProvisioningStateSucceeded ... + TopicProvisioningStateSucceeded TopicProvisioningState = "Succeeded" + // TopicProvisioningStateUpdating ... + TopicProvisioningStateUpdating TopicProvisioningState = "Updating" +) + +// PossibleTopicProvisioningStateValues returns an array of possible values for the TopicProvisioningState const type. +func PossibleTopicProvisioningStateValues() []TopicProvisioningState { + return []TopicProvisioningState{TopicProvisioningStateCanceled, TopicProvisioningStateCreating, TopicProvisioningStateDeleting, TopicProvisioningStateFailed, TopicProvisioningStateSucceeded, TopicProvisioningStateUpdating} +} + +// TopicTypeProvisioningState enumerates the values for topic type provisioning state. +type TopicTypeProvisioningState string + +const ( + // TopicTypeProvisioningStateCanceled ... + TopicTypeProvisioningStateCanceled TopicTypeProvisioningState = "Canceled" + // TopicTypeProvisioningStateCreating ... + TopicTypeProvisioningStateCreating TopicTypeProvisioningState = "Creating" + // TopicTypeProvisioningStateDeleting ... + TopicTypeProvisioningStateDeleting TopicTypeProvisioningState = "Deleting" + // TopicTypeProvisioningStateFailed ... + TopicTypeProvisioningStateFailed TopicTypeProvisioningState = "Failed" + // TopicTypeProvisioningStateSucceeded ... + TopicTypeProvisioningStateSucceeded TopicTypeProvisioningState = "Succeeded" + // TopicTypeProvisioningStateUpdating ... + TopicTypeProvisioningStateUpdating TopicTypeProvisioningState = "Updating" +) + +// PossibleTopicTypeProvisioningStateValues returns an array of possible values for the TopicTypeProvisioningState const type. +func PossibleTopicTypeProvisioningStateValues() []TopicTypeProvisioningState { + return []TopicTypeProvisioningState{TopicTypeProvisioningStateCanceled, TopicTypeProvisioningStateCreating, TopicTypeProvisioningStateDeleting, TopicTypeProvisioningStateFailed, TopicTypeProvisioningStateSucceeded, TopicTypeProvisioningStateUpdating} +} + +// BasicAdvancedFilter represents an advanced filter that can be used to filter events based on various event +// envelope/data fields. +type BasicAdvancedFilter interface { + AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) + AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) + AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) + AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) + AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) + AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) + AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) + AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) + AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) + AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) + AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) + AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) + AsAdvancedFilter() (*AdvancedFilter, bool) +} + +// AdvancedFilter represents an advanced filter that can be used to filter events based on various event +// envelope/data fields. +type AdvancedFilter struct { + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +func unmarshalBasicAdvancedFilter(body []byte) (BasicAdvancedFilter, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["operatorType"] { + case string(OperatorTypeNumberIn): + var niaf NumberInAdvancedFilter + err := json.Unmarshal(body, &niaf) + return niaf, err + case string(OperatorTypeNumberNotIn): + var nniaf NumberNotInAdvancedFilter + err := json.Unmarshal(body, &nniaf) + return nniaf, err + case string(OperatorTypeNumberLessThan): + var nltaf NumberLessThanAdvancedFilter + err := json.Unmarshal(body, &nltaf) + return nltaf, err + case string(OperatorTypeNumberGreaterThan): + var ngtaf NumberGreaterThanAdvancedFilter + err := json.Unmarshal(body, &ngtaf) + return ngtaf, err + case string(OperatorTypeNumberLessThanOrEquals): + var nltoeaf NumberLessThanOrEqualsAdvancedFilter + err := json.Unmarshal(body, &nltoeaf) + return nltoeaf, err + case string(OperatorTypeNumberGreaterThanOrEquals): + var ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter + err := json.Unmarshal(body, &ngtoeaf) + return ngtoeaf, err + case string(OperatorTypeBoolEquals): + var beaf BoolEqualsAdvancedFilter + err := json.Unmarshal(body, &beaf) + return beaf, err + case string(OperatorTypeStringIn): + var siaf StringInAdvancedFilter + err := json.Unmarshal(body, &siaf) + return siaf, err + case string(OperatorTypeStringNotIn): + var sniaf StringNotInAdvancedFilter + err := json.Unmarshal(body, &sniaf) + return sniaf, err + case string(OperatorTypeStringBeginsWith): + var sbwaf StringBeginsWithAdvancedFilter + err := json.Unmarshal(body, &sbwaf) + return sbwaf, err + case string(OperatorTypeStringEndsWith): + var sewaf StringEndsWithAdvancedFilter + err := json.Unmarshal(body, &sewaf) + return sewaf, err + case string(OperatorTypeStringContains): + var scaf StringContainsAdvancedFilter + err := json.Unmarshal(body, &scaf) + return scaf, err + default: + var af AdvancedFilter + err := json.Unmarshal(body, &af) + return af, err + } +} +func unmarshalBasicAdvancedFilterArray(body []byte) ([]BasicAdvancedFilter, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + afArray := make([]BasicAdvancedFilter, len(rawMessages)) + + for index, rawMessage := range rawMessages { + af, err := unmarshalBasicAdvancedFilter(*rawMessage) + if err != nil { + return nil, err + } + afArray[index] = af + } + return afArray, nil +} + +// MarshalJSON is the custom marshaler for AdvancedFilter. +func (af AdvancedFilter) MarshalJSON() ([]byte, error) { + af.OperatorType = OperatorTypeAdvancedFilter + objectMap := make(map[string]interface{}) + if af.Key != nil { + objectMap["key"] = af.Key + } + if af.OperatorType != "" { + objectMap["operatorType"] = af.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return &af, true +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for AdvancedFilter. +func (af AdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &af, true +} + +// BoolEqualsAdvancedFilter boolEquals Filter +type BoolEqualsAdvancedFilter struct { + // Value - The filter value + Value *bool `json:"value,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) MarshalJSON() ([]byte, error) { + beaf.OperatorType = OperatorTypeBoolEquals + objectMap := make(map[string]interface{}) + if beaf.Value != nil { + objectMap["value"] = beaf.Value + } + if beaf.Key != nil { + objectMap["key"] = beaf.Key + } + if beaf.OperatorType != "" { + objectMap["operatorType"] = beaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return &beaf, true +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for BoolEqualsAdvancedFilter. +func (beaf BoolEqualsAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &beaf, true +} + +// BasicDeadLetterDestination information about the dead letter destination for an event subscription. To configure a +// deadletter destination, do not directly instantiate an object of this class. Instead, instantiate an object of a +// derived class. Currently, StorageBlobDeadLetterDestination is the only class that derives from this class. +type BasicDeadLetterDestination interface { + AsStorageBlobDeadLetterDestination() (*StorageBlobDeadLetterDestination, bool) + AsDeadLetterDestination() (*DeadLetterDestination, bool) +} + +// DeadLetterDestination information about the dead letter destination for an event subscription. To configure +// a deadletter destination, do not directly instantiate an object of this class. Instead, instantiate an +// object of a derived class. Currently, StorageBlobDeadLetterDestination is the only class that derives from +// this class. +type DeadLetterDestination struct { + // EndpointType - Possible values include: 'EndpointTypeDeadLetterDestination', 'EndpointTypeStorageBlob' + EndpointType EndpointTypeBasicDeadLetterDestination `json:"endpointType,omitempty"` +} + +func unmarshalBasicDeadLetterDestination(body []byte) (BasicDeadLetterDestination, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["endpointType"] { + case string(EndpointTypeStorageBlob): + var sbdld StorageBlobDeadLetterDestination + err := json.Unmarshal(body, &sbdld) + return sbdld, err + default: + var dld DeadLetterDestination + err := json.Unmarshal(body, &dld) + return dld, err + } +} +func unmarshalBasicDeadLetterDestinationArray(body []byte) ([]BasicDeadLetterDestination, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + dldArray := make([]BasicDeadLetterDestination, len(rawMessages)) + + for index, rawMessage := range rawMessages { + dld, err := unmarshalBasicDeadLetterDestination(*rawMessage) + if err != nil { + return nil, err + } + dldArray[index] = dld + } + return dldArray, nil +} + +// MarshalJSON is the custom marshaler for DeadLetterDestination. +func (dld DeadLetterDestination) MarshalJSON() ([]byte, error) { + dld.EndpointType = EndpointTypeDeadLetterDestination + objectMap := make(map[string]interface{}) + if dld.EndpointType != "" { + objectMap["endpointType"] = dld.EndpointType + } + return json.Marshal(objectMap) +} + +// AsStorageBlobDeadLetterDestination is the BasicDeadLetterDestination implementation for DeadLetterDestination. +func (dld DeadLetterDestination) AsStorageBlobDeadLetterDestination() (*StorageBlobDeadLetterDestination, bool) { + return nil, false +} + +// AsDeadLetterDestination is the BasicDeadLetterDestination implementation for DeadLetterDestination. +func (dld DeadLetterDestination) AsDeadLetterDestination() (*DeadLetterDestination, bool) { + return &dld, true +} + +// AsBasicDeadLetterDestination is the BasicDeadLetterDestination implementation for DeadLetterDestination. +func (dld DeadLetterDestination) AsBasicDeadLetterDestination() (BasicDeadLetterDestination, bool) { + return &dld, true +} + +// Domain eventGrid Domain +type Domain struct { + autorest.Response `json:"-"` + // DomainProperties - Properties of the domain + *DomainProperties `json:"properties,omitempty"` + // Location - Location of the resource + Location *string `json:"location,omitempty"` + // Tags - Tags of the resource + Tags map[string]*string `json:"tags"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Domain. +func (d Domain) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if d.DomainProperties != nil { + objectMap["properties"] = d.DomainProperties + } + if d.Location != nil { + objectMap["location"] = d.Location + } + if d.Tags != nil { + objectMap["tags"] = d.Tags + } + if d.ID != nil { + objectMap["id"] = d.ID + } + if d.Name != nil { + objectMap["name"] = d.Name + } + if d.Type != nil { + objectMap["type"] = d.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Domain struct. +func (d *Domain) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var domainProperties DomainProperties + err = json.Unmarshal(*v, &domainProperties) + if err != nil { + return err + } + d.DomainProperties = &domainProperties + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + d.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + d.Tags = tags + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + d.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + d.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + d.Type = &typeVar + } + } + } + + return nil +} + +// DomainProperties properties of the Domain +type DomainProperties struct { + // ProvisioningState - Provisioning state of the domain. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Canceled', 'Failed' + ProvisioningState DomainProvisioningState `json:"provisioningState,omitempty"` + // Endpoint - Endpoint for the domain. + Endpoint *string `json:"endpoint,omitempty"` + // InputSchema - This determines the format that Event Grid should expect for incoming events published to the domain. Possible values include: 'InputSchemaEventGridSchema', 'InputSchemaCustomEventSchema', 'InputSchemaCloudEventV01Schema' + InputSchema InputSchema `json:"inputSchema,omitempty"` + // InputSchemaMapping - Information about the InputSchemaMapping which specified the info about mapping event payload. + InputSchemaMapping BasicInputSchemaMapping `json:"inputSchemaMapping,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for DomainProperties struct. +func (dp *DomainProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "provisioningState": + if v != nil { + var provisioningState DomainProvisioningState + err = json.Unmarshal(*v, &provisioningState) + if err != nil { + return err + } + dp.ProvisioningState = provisioningState + } + case "endpoint": + if v != nil { + var endpoint string + err = json.Unmarshal(*v, &endpoint) + if err != nil { + return err + } + dp.Endpoint = &endpoint + } + case "inputSchema": + if v != nil { + var inputSchema InputSchema + err = json.Unmarshal(*v, &inputSchema) + if err != nil { + return err + } + dp.InputSchema = inputSchema + } + case "inputSchemaMapping": + if v != nil { + inputSchemaMapping, err := unmarshalBasicInputSchemaMapping(*v) + if err != nil { + return err + } + dp.InputSchemaMapping = inputSchemaMapping + } + } + } + + return nil +} + +// DomainRegenerateKeyRequest domain regenerate share access key request +type DomainRegenerateKeyRequest struct { + // KeyName - Key name to regenerate key1 or key2 + KeyName *string `json:"keyName,omitempty"` +} + +// DomainsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DomainsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DomainsCreateOrUpdateFuture) Result(client DomainsClient) (d Domain, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.DomainsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DomainsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DomainsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DomainsDeleteFuture) Result(client DomainsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.DomainsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DomainSharedAccessKeys shared access keys of the Domain +type DomainSharedAccessKeys struct { + autorest.Response `json:"-"` + // Key1 - Shared access key1 for the domain. + Key1 *string `json:"key1,omitempty"` + // Key2 - Shared access key2 for the domain. + Key2 *string `json:"key2,omitempty"` +} + +// DomainsListResult result of the List Domains operation +type DomainsListResult struct { + autorest.Response `json:"-"` + // Value - A collection of Domains + Value *[]Domain `json:"value,omitempty"` +} + +// DomainsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DomainsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DomainsUpdateFuture) Result(client DomainsClient) (d Domain, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.DomainsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.DomainsUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DomainTopic domain Topic +type DomainTopic struct { + autorest.Response `json:"-"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// DomainTopicsListResult result of the List Domain Topics operation +type DomainTopicsListResult struct { + autorest.Response `json:"-"` + // Value - A collection of Domain Topics + Value *[]DomainTopic `json:"value,omitempty"` +} + +// DomainUpdateParameters properties of the Domain update +type DomainUpdateParameters struct { + // Tags - Tags of the domains resource + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DomainUpdateParameters. +func (dup DomainUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dup.Tags != nil { + objectMap["tags"] = dup.Tags + } + return json.Marshal(objectMap) +} + +// EventHubEventSubscriptionDestination information about the event hub destination for an event +// subscription +type EventHubEventSubscriptionDestination struct { + // EventHubEventSubscriptionDestinationProperties - Event Hub Properties of the event subscription destination + *EventHubEventSubscriptionDestinationProperties `json:"properties,omitempty"` + // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection' + EndpointType EndpointType `json:"endpointType,omitempty"` +} + +// MarshalJSON is the custom marshaler for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) MarshalJSON() ([]byte, error) { + ehesd.EndpointType = EndpointTypeEventHub + objectMap := make(map[string]interface{}) + if ehesd.EventHubEventSubscriptionDestinationProperties != nil { + objectMap["properties"] = ehesd.EventHubEventSubscriptionDestinationProperties + } + if ehesd.EndpointType != "" { + objectMap["endpointType"] = ehesd.EndpointType + } + return json.Marshal(objectMap) +} + +// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { + return &ehesd, true +} + +// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) { + return nil, false +} + +// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { + return nil, false +} + +// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination. +func (ehesd EventHubEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { + return &ehesd, true +} + +// UnmarshalJSON is the custom unmarshaler for EventHubEventSubscriptionDestination struct. +func (ehesd *EventHubEventSubscriptionDestination) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var eventHubEventSubscriptionDestinationProperties EventHubEventSubscriptionDestinationProperties + err = json.Unmarshal(*v, &eventHubEventSubscriptionDestinationProperties) + if err != nil { + return err + } + ehesd.EventHubEventSubscriptionDestinationProperties = &eventHubEventSubscriptionDestinationProperties + } + case "endpointType": + if v != nil { + var endpointType EndpointType + err = json.Unmarshal(*v, &endpointType) + if err != nil { + return err + } + ehesd.EndpointType = endpointType + } + } + } + + return nil +} + +// EventHubEventSubscriptionDestinationProperties the properties for a event hub destination. +type EventHubEventSubscriptionDestinationProperties struct { + // ResourceID - The Azure Resource Id that represents the endpoint of an Event Hub destination of an event subscription. + ResourceID *string `json:"resourceId,omitempty"` +} + +// EventSubscription event Subscription +type EventSubscription struct { + autorest.Response `json:"-"` + // EventSubscriptionProperties - Properties of the event subscription + *EventSubscriptionProperties `json:"properties,omitempty"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for EventSubscription. +func (es EventSubscription) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if es.EventSubscriptionProperties != nil { + objectMap["properties"] = es.EventSubscriptionProperties + } + if es.ID != nil { + objectMap["id"] = es.ID + } + if es.Name != nil { + objectMap["name"] = es.Name + } + if es.Type != nil { + objectMap["type"] = es.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for EventSubscription struct. +func (es *EventSubscription) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var eventSubscriptionProperties EventSubscriptionProperties + err = json.Unmarshal(*v, &eventSubscriptionProperties) + if err != nil { + return err + } + es.EventSubscriptionProperties = &eventSubscriptionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + es.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + es.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + es.Type = &typeVar + } + } + } + + return nil +} + +// BasicEventSubscriptionDestination information about the destination for an event subscription +type BasicEventSubscriptionDestination interface { + AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) + AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) + AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) + AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) + AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) +} + +// EventSubscriptionDestination information about the destination for an event subscription +type EventSubscriptionDestination struct { + // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection' + EndpointType EndpointType `json:"endpointType,omitempty"` +} + +func unmarshalBasicEventSubscriptionDestination(body []byte) (BasicEventSubscriptionDestination, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["endpointType"] { + case string(EndpointTypeWebHook): + var whesd WebHookEventSubscriptionDestination + err := json.Unmarshal(body, &whesd) + return whesd, err + case string(EndpointTypeEventHub): + var ehesd EventHubEventSubscriptionDestination + err := json.Unmarshal(body, &ehesd) + return ehesd, err + case string(EndpointTypeStorageQueue): + var sqesd StorageQueueEventSubscriptionDestination + err := json.Unmarshal(body, &sqesd) + return sqesd, err + case string(EndpointTypeHybridConnection): + var hcesd HybridConnectionEventSubscriptionDestination + err := json.Unmarshal(body, &hcesd) + return hcesd, err + default: + var esd EventSubscriptionDestination + err := json.Unmarshal(body, &esd) + return esd, err + } +} +func unmarshalBasicEventSubscriptionDestinationArray(body []byte) ([]BasicEventSubscriptionDestination, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + esdArray := make([]BasicEventSubscriptionDestination, len(rawMessages)) + + for index, rawMessage := range rawMessages { + esd, err := unmarshalBasicEventSubscriptionDestination(*rawMessage) + if err != nil { + return nil, err + } + esdArray[index] = esd + } + return esdArray, nil +} + +// MarshalJSON is the custom marshaler for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) MarshalJSON() ([]byte, error) { + esd.EndpointType = EndpointTypeEventSubscriptionDestination + objectMap := make(map[string]interface{}) + if esd.EndpointType != "" { + objectMap["endpointType"] = esd.EndpointType + } + return json.Marshal(objectMap) +} + +// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { + return nil, false +} + +// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) { + return nil, false +} + +// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { + return &esd, true +} + +// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination. +func (esd EventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { + return &esd, true +} + +// EventSubscriptionFilter filter for the Event Subscription +type EventSubscriptionFilter struct { + // SubjectBeginsWith - An optional string to filter events for an event subscription based on a resource path prefix. + // The format of this depends on the publisher of the events. + // Wildcard characters are not supported in this path. + SubjectBeginsWith *string `json:"subjectBeginsWith,omitempty"` + // SubjectEndsWith - An optional string to filter events for an event subscription based on a resource path suffix. + // Wildcard characters are not supported in this path. + SubjectEndsWith *string `json:"subjectEndsWith,omitempty"` + // IncludedEventTypes - A list of applicable event types that need to be part of the event subscription. + // If it is desired to subscribe to all event types, the string "all" needs to be specified as an element in this list. + IncludedEventTypes *[]string `json:"includedEventTypes,omitempty"` + // IsSubjectCaseSensitive - Specifies if the SubjectBeginsWith and SubjectEndsWith properties of the filter + // should be compared in a case sensitive manner. + IsSubjectCaseSensitive *bool `json:"isSubjectCaseSensitive,omitempty"` + // AdvancedFilters - A list of advanced filters. + AdvancedFilters *[]BasicAdvancedFilter `json:"advancedFilters,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for EventSubscriptionFilter struct. +func (esf *EventSubscriptionFilter) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "subjectBeginsWith": + if v != nil { + var subjectBeginsWith string + err = json.Unmarshal(*v, &subjectBeginsWith) + if err != nil { + return err + } + esf.SubjectBeginsWith = &subjectBeginsWith + } + case "subjectEndsWith": + if v != nil { + var subjectEndsWith string + err = json.Unmarshal(*v, &subjectEndsWith) + if err != nil { + return err + } + esf.SubjectEndsWith = &subjectEndsWith + } + case "includedEventTypes": + if v != nil { + var includedEventTypes []string + err = json.Unmarshal(*v, &includedEventTypes) + if err != nil { + return err + } + esf.IncludedEventTypes = &includedEventTypes + } + case "isSubjectCaseSensitive": + if v != nil { + var isSubjectCaseSensitive bool + err = json.Unmarshal(*v, &isSubjectCaseSensitive) + if err != nil { + return err + } + esf.IsSubjectCaseSensitive = &isSubjectCaseSensitive + } + case "advancedFilters": + if v != nil { + advancedFilters, err := unmarshalBasicAdvancedFilterArray(*v) + if err != nil { + return err + } + esf.AdvancedFilters = &advancedFilters + } + } + } + + return nil +} + +// EventSubscriptionFullURL full endpoint url of an event subscription +type EventSubscriptionFullURL struct { + autorest.Response `json:"-"` + // EndpointURL - The URL that represents the endpoint of the destination of an event subscription. + EndpointURL *string `json:"endpointUrl,omitempty"` +} + +// EventSubscriptionProperties properties of the Event Subscription +type EventSubscriptionProperties struct { + // Topic - Name of the topic of the event subscription. + Topic *string `json:"topic,omitempty"` + // ProvisioningState - Provisioning state of the event subscription. Possible values include: 'EventSubscriptionProvisioningStateCreating', 'EventSubscriptionProvisioningStateUpdating', 'EventSubscriptionProvisioningStateDeleting', 'EventSubscriptionProvisioningStateSucceeded', 'EventSubscriptionProvisioningStateCanceled', 'EventSubscriptionProvisioningStateFailed', 'EventSubscriptionProvisioningStateAwaitingManualAction' + ProvisioningState EventSubscriptionProvisioningState `json:"provisioningState,omitempty"` + // Destination - Information about the destination where events have to be delivered for the event subscription. + Destination BasicEventSubscriptionDestination `json:"destination,omitempty"` + // Filter - Information about the filter for the event subscription. + Filter *EventSubscriptionFilter `json:"filter,omitempty"` + // Labels - List of user defined labels. + Labels *[]string `json:"labels,omitempty"` + // ExpirationTimeUtc - Expiration time of the event subscription. + ExpirationTimeUtc *date.Time `json:"expirationTimeUtc,omitempty"` + // EventDeliverySchema - The event delivery schema for the event subscription. Possible values include: 'EventGridSchema', 'CloudEventV01Schema', 'CustomInputSchema' + EventDeliverySchema EventDeliverySchema `json:"eventDeliverySchema,omitempty"` + // RetryPolicy - The retry policy for events. This can be used to configure maximum number of delivery attempts and time to live for events. + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + // DeadLetterDestination - The DeadLetter destination of the event subscription. + DeadLetterDestination BasicDeadLetterDestination `json:"deadLetterDestination,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for EventSubscriptionProperties struct. +func (esp *EventSubscriptionProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "topic": + if v != nil { + var topic string + err = json.Unmarshal(*v, &topic) + if err != nil { + return err + } + esp.Topic = &topic + } + case "provisioningState": + if v != nil { + var provisioningState EventSubscriptionProvisioningState + err = json.Unmarshal(*v, &provisioningState) + if err != nil { + return err + } + esp.ProvisioningState = provisioningState + } + case "destination": + if v != nil { + destination, err := unmarshalBasicEventSubscriptionDestination(*v) + if err != nil { + return err + } + esp.Destination = destination + } + case "filter": + if v != nil { + var filter EventSubscriptionFilter + err = json.Unmarshal(*v, &filter) + if err != nil { + return err + } + esp.Filter = &filter + } + case "labels": + if v != nil { + var labels []string + err = json.Unmarshal(*v, &labels) + if err != nil { + return err + } + esp.Labels = &labels + } + case "expirationTimeUtc": + if v != nil { + var expirationTimeUtc date.Time + err = json.Unmarshal(*v, &expirationTimeUtc) + if err != nil { + return err + } + esp.ExpirationTimeUtc = &expirationTimeUtc + } + case "eventDeliverySchema": + if v != nil { + var eventDeliverySchema EventDeliverySchema + err = json.Unmarshal(*v, &eventDeliverySchema) + if err != nil { + return err + } + esp.EventDeliverySchema = eventDeliverySchema + } + case "retryPolicy": + if v != nil { + var retryPolicy RetryPolicy + err = json.Unmarshal(*v, &retryPolicy) + if err != nil { + return err + } + esp.RetryPolicy = &retryPolicy + } + case "deadLetterDestination": + if v != nil { + deadLetterDestination, err := unmarshalBasicDeadLetterDestination(*v) + if err != nil { + return err + } + esp.DeadLetterDestination = deadLetterDestination + } + } + } + + return nil +} + +// EventSubscriptionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type EventSubscriptionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *EventSubscriptionsCreateOrUpdateFuture) Result(client EventSubscriptionsClient) (es EventSubscription, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if es.Response.Response, err = future.GetResult(sender); err == nil && es.Response.Response.StatusCode != http.StatusNoContent { + es, err = client.CreateOrUpdateResponder(es.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsCreateOrUpdateFuture", "Result", es.Response.Response, "Failure responding to request") + } + } + return +} + +// EventSubscriptionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type EventSubscriptionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *EventSubscriptionsDeleteFuture) Result(client EventSubscriptionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// EventSubscriptionsListResult result of the List EventSubscriptions operation +type EventSubscriptionsListResult struct { + autorest.Response `json:"-"` + // Value - A collection of EventSubscriptions + Value *[]EventSubscription `json:"value,omitempty"` +} + +// EventSubscriptionsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type EventSubscriptionsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *EventSubscriptionsUpdateFuture) Result(client EventSubscriptionsClient) (es EventSubscription, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if es.Response.Response, err = future.GetResult(sender); err == nil && es.Response.Response.StatusCode != http.StatusNoContent { + es, err = client.UpdateResponder(es.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsUpdateFuture", "Result", es.Response.Response, "Failure responding to request") + } + } + return +} + +// EventSubscriptionUpdateParameters properties of the Event Subscription update +type EventSubscriptionUpdateParameters struct { + // Destination - Information about the destination where events have to be delivered for the event subscription. + Destination BasicEventSubscriptionDestination `json:"destination,omitempty"` + // Filter - Information about the filter for the event subscription. + Filter *EventSubscriptionFilter `json:"filter,omitempty"` + // Labels - List of user defined labels. + Labels *[]string `json:"labels,omitempty"` + // ExpirationTimeUtc - Information about the expiration time for the event subscription. + ExpirationTimeUtc *date.Time `json:"expirationTimeUtc,omitempty"` + // EventDeliverySchema - The event delivery schema for the event subscription. Possible values include: 'EventGridSchema', 'CloudEventV01Schema', 'CustomInputSchema' + EventDeliverySchema EventDeliverySchema `json:"eventDeliverySchema,omitempty"` + // RetryPolicy - The retry policy for events. This can be used to configure maximum number of delivery attempts and time to live for events. + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + // DeadLetterDestination - The DeadLetter destination of the event subscription. + DeadLetterDestination BasicDeadLetterDestination `json:"deadLetterDestination,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for EventSubscriptionUpdateParameters struct. +func (esup *EventSubscriptionUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "destination": + if v != nil { + destination, err := unmarshalBasicEventSubscriptionDestination(*v) + if err != nil { + return err + } + esup.Destination = destination + } + case "filter": + if v != nil { + var filter EventSubscriptionFilter + err = json.Unmarshal(*v, &filter) + if err != nil { + return err + } + esup.Filter = &filter + } + case "labels": + if v != nil { + var labels []string + err = json.Unmarshal(*v, &labels) + if err != nil { + return err + } + esup.Labels = &labels + } + case "expirationTimeUtc": + if v != nil { + var expirationTimeUtc date.Time + err = json.Unmarshal(*v, &expirationTimeUtc) + if err != nil { + return err + } + esup.ExpirationTimeUtc = &expirationTimeUtc + } + case "eventDeliverySchema": + if v != nil { + var eventDeliverySchema EventDeliverySchema + err = json.Unmarshal(*v, &eventDeliverySchema) + if err != nil { + return err + } + esup.EventDeliverySchema = eventDeliverySchema + } + case "retryPolicy": + if v != nil { + var retryPolicy RetryPolicy + err = json.Unmarshal(*v, &retryPolicy) + if err != nil { + return err + } + esup.RetryPolicy = &retryPolicy + } + case "deadLetterDestination": + if v != nil { + deadLetterDestination, err := unmarshalBasicDeadLetterDestination(*v) + if err != nil { + return err + } + esup.DeadLetterDestination = deadLetterDestination + } + } + } + + return nil +} + +// EventType event Type for a subject under a topic +type EventType struct { + // EventTypeProperties - Properties of the event type. + *EventTypeProperties `json:"properties,omitempty"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for EventType. +func (et EventType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if et.EventTypeProperties != nil { + objectMap["properties"] = et.EventTypeProperties + } + if et.ID != nil { + objectMap["id"] = et.ID + } + if et.Name != nil { + objectMap["name"] = et.Name + } + if et.Type != nil { + objectMap["type"] = et.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for EventType struct. +func (et *EventType) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var eventTypeProperties EventTypeProperties + err = json.Unmarshal(*v, &eventTypeProperties) + if err != nil { + return err + } + et.EventTypeProperties = &eventTypeProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + et.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + et.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + et.Type = &typeVar + } + } + } + + return nil +} + +// EventTypeProperties properties of the event type +type EventTypeProperties struct { + // DisplayName - Display name of the event type. + DisplayName *string `json:"displayName,omitempty"` + // Description - Description of the event type. + Description *string `json:"description,omitempty"` + // SchemaURL - Url of the schema for this event type. + SchemaURL *string `json:"schemaUrl,omitempty"` +} + +// EventTypesListResult result of the List Event Types operation +type EventTypesListResult struct { + autorest.Response `json:"-"` + // Value - A collection of event types + Value *[]EventType `json:"value,omitempty"` +} + +// HybridConnectionEventSubscriptionDestination information about the HybridConnection destination for an +// event subscription. +type HybridConnectionEventSubscriptionDestination struct { + // HybridConnectionEventSubscriptionDestinationProperties - Hybrid connection Properties of the event subscription destination + *HybridConnectionEventSubscriptionDestinationProperties `json:"properties,omitempty"` + // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection' + EndpointType EndpointType `json:"endpointType,omitempty"` +} + +// MarshalJSON is the custom marshaler for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) MarshalJSON() ([]byte, error) { + hcesd.EndpointType = EndpointTypeHybridConnection + objectMap := make(map[string]interface{}) + if hcesd.HybridConnectionEventSubscriptionDestinationProperties != nil { + objectMap["properties"] = hcesd.HybridConnectionEventSubscriptionDestinationProperties + } + if hcesd.EndpointType != "" { + objectMap["endpointType"] = hcesd.EndpointType + } + return json.Marshal(objectMap) +} + +// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { + return nil, false +} + +// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) { + return nil, false +} + +// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) { + return &hcesd, true +} + +// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { + return nil, false +} + +// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination. +func (hcesd HybridConnectionEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { + return &hcesd, true +} + +// UnmarshalJSON is the custom unmarshaler for HybridConnectionEventSubscriptionDestination struct. +func (hcesd *HybridConnectionEventSubscriptionDestination) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var hybridConnectionEventSubscriptionDestinationProperties HybridConnectionEventSubscriptionDestinationProperties + err = json.Unmarshal(*v, &hybridConnectionEventSubscriptionDestinationProperties) + if err != nil { + return err + } + hcesd.HybridConnectionEventSubscriptionDestinationProperties = &hybridConnectionEventSubscriptionDestinationProperties + } + case "endpointType": + if v != nil { + var endpointType EndpointType + err = json.Unmarshal(*v, &endpointType) + if err != nil { + return err + } + hcesd.EndpointType = endpointType + } + } + } + + return nil +} + +// HybridConnectionEventSubscriptionDestinationProperties the properties for a hybrid connection +// destination. +type HybridConnectionEventSubscriptionDestinationProperties struct { + // ResourceID - The Azure Resource ID of an hybrid connection that is the destination of an event subscription. + ResourceID *string `json:"resourceId,omitempty"` +} + +// BasicInputSchemaMapping by default, Event Grid expects events to be in the Event Grid event schema. Specifying an +// input schema mapping enables publishing to Event Grid using a custom input schema. Currently, the only supported +// type of InputSchemaMapping is 'JsonInputSchemaMapping'. +type BasicInputSchemaMapping interface { + AsJSONInputSchemaMapping() (*JSONInputSchemaMapping, bool) + AsInputSchemaMapping() (*InputSchemaMapping, bool) +} + +// InputSchemaMapping by default, Event Grid expects events to be in the Event Grid event schema. Specifying an +// input schema mapping enables publishing to Event Grid using a custom input schema. Currently, the only +// supported type of InputSchemaMapping is 'JsonInputSchemaMapping'. +type InputSchemaMapping struct { + // InputSchemaMappingType - Possible values include: 'InputSchemaMappingTypeInputSchemaMapping', 'InputSchemaMappingTypeJSON' + InputSchemaMappingType InputSchemaMappingType `json:"inputSchemaMappingType,omitempty"` +} + +func unmarshalBasicInputSchemaMapping(body []byte) (BasicInputSchemaMapping, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["inputSchemaMappingType"] { + case string(InputSchemaMappingTypeJSON): + var jism JSONInputSchemaMapping + err := json.Unmarshal(body, &jism) + return jism, err + default: + var ism InputSchemaMapping + err := json.Unmarshal(body, &ism) + return ism, err + } +} +func unmarshalBasicInputSchemaMappingArray(body []byte) ([]BasicInputSchemaMapping, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ismArray := make([]BasicInputSchemaMapping, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ism, err := unmarshalBasicInputSchemaMapping(*rawMessage) + if err != nil { + return nil, err + } + ismArray[index] = ism + } + return ismArray, nil +} + +// MarshalJSON is the custom marshaler for InputSchemaMapping. +func (ism InputSchemaMapping) MarshalJSON() ([]byte, error) { + ism.InputSchemaMappingType = InputSchemaMappingTypeInputSchemaMapping + objectMap := make(map[string]interface{}) + if ism.InputSchemaMappingType != "" { + objectMap["inputSchemaMappingType"] = ism.InputSchemaMappingType + } + return json.Marshal(objectMap) +} + +// AsJSONInputSchemaMapping is the BasicInputSchemaMapping implementation for InputSchemaMapping. +func (ism InputSchemaMapping) AsJSONInputSchemaMapping() (*JSONInputSchemaMapping, bool) { + return nil, false +} + +// AsInputSchemaMapping is the BasicInputSchemaMapping implementation for InputSchemaMapping. +func (ism InputSchemaMapping) AsInputSchemaMapping() (*InputSchemaMapping, bool) { + return &ism, true +} + +// AsBasicInputSchemaMapping is the BasicInputSchemaMapping implementation for InputSchemaMapping. +func (ism InputSchemaMapping) AsBasicInputSchemaMapping() (BasicInputSchemaMapping, bool) { + return &ism, true +} + +// JSONField this is used to express the source of an input schema mapping for a single target field in the +// Event Grid Event schema. This is currently used in the mappings for the 'id','topic' and 'eventTime' +// properties. This represents a field in the input event schema. +type JSONField struct { + // SourceField - Name of a field in the input event schema that's to be used as the source of a mapping. + SourceField *string `json:"sourceField,omitempty"` +} + +// JSONFieldWithDefault this is used to express the source of an input schema mapping for a single target +// field in the Event Grid Event schema. This is currently used in the mappings for the +// 'subject','eventType' and 'dataVersion' properties. This represents a field in the input event schema +// along with a default value to be used, and at least one of these two properties should be provided. +type JSONFieldWithDefault struct { + // SourceField - Name of a field in the input event schema that's to be used as the source of a mapping. + SourceField *string `json:"sourceField,omitempty"` + // DefaultValue - The default value to be used for mapping when a SourceField is not provided or if there's no property with the specified name in the published JSON event payload. + DefaultValue *string `json:"defaultValue,omitempty"` +} + +// JSONInputSchemaMapping this enables publishing to Event Grid using a custom input schema. This can be +// used to map properties from a custom input JSON schema to the Event Grid event schema. +type JSONInputSchemaMapping struct { + // JSONInputSchemaMappingProperties - JSON Properties of the input schema mapping + *JSONInputSchemaMappingProperties `json:"properties,omitempty"` + // InputSchemaMappingType - Possible values include: 'InputSchemaMappingTypeInputSchemaMapping', 'InputSchemaMappingTypeJSON' + InputSchemaMappingType InputSchemaMappingType `json:"inputSchemaMappingType,omitempty"` +} + +// MarshalJSON is the custom marshaler for JSONInputSchemaMapping. +func (jism JSONInputSchemaMapping) MarshalJSON() ([]byte, error) { + jism.InputSchemaMappingType = InputSchemaMappingTypeJSON + objectMap := make(map[string]interface{}) + if jism.JSONInputSchemaMappingProperties != nil { + objectMap["properties"] = jism.JSONInputSchemaMappingProperties + } + if jism.InputSchemaMappingType != "" { + objectMap["inputSchemaMappingType"] = jism.InputSchemaMappingType + } + return json.Marshal(objectMap) +} + +// AsJSONInputSchemaMapping is the BasicInputSchemaMapping implementation for JSONInputSchemaMapping. +func (jism JSONInputSchemaMapping) AsJSONInputSchemaMapping() (*JSONInputSchemaMapping, bool) { + return &jism, true +} + +// AsInputSchemaMapping is the BasicInputSchemaMapping implementation for JSONInputSchemaMapping. +func (jism JSONInputSchemaMapping) AsInputSchemaMapping() (*InputSchemaMapping, bool) { + return nil, false +} + +// AsBasicInputSchemaMapping is the BasicInputSchemaMapping implementation for JSONInputSchemaMapping. +func (jism JSONInputSchemaMapping) AsBasicInputSchemaMapping() (BasicInputSchemaMapping, bool) { + return &jism, true +} + +// UnmarshalJSON is the custom unmarshaler for JSONInputSchemaMapping struct. +func (jism *JSONInputSchemaMapping) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var JSONInputSchemaMappingProperties JSONInputSchemaMappingProperties + err = json.Unmarshal(*v, &JSONInputSchemaMappingProperties) + if err != nil { + return err + } + jism.JSONInputSchemaMappingProperties = &JSONInputSchemaMappingProperties + } + case "inputSchemaMappingType": + if v != nil { + var inputSchemaMappingType InputSchemaMappingType + err = json.Unmarshal(*v, &inputSchemaMappingType) + if err != nil { + return err + } + jism.InputSchemaMappingType = inputSchemaMappingType + } + } + } + + return nil +} + +// JSONInputSchemaMappingProperties this can be used to map properties of a source schema (or default +// values, for certain supported properties) to properties of the EventGridEvent schema. +type JSONInputSchemaMappingProperties struct { + // ID - The mapping information for the Id property of the Event Grid Event. + ID *JSONField `json:"id,omitempty"` + // Topic - The mapping information for the Topic property of the Event Grid Event. + Topic *JSONField `json:"topic,omitempty"` + // EventTime - The mapping information for the EventTime property of the Event Grid Event. + EventTime *JSONField `json:"eventTime,omitempty"` + // EventType - The mapping information for the EventType property of the Event Grid Event. + EventType *JSONFieldWithDefault `json:"eventType,omitempty"` + // Subject - The mapping information for the Subject property of the Event Grid Event. + Subject *JSONFieldWithDefault `json:"subject,omitempty"` + // DataVersion - The mapping information for the DataVersion property of the Event Grid Event. + DataVersion *JSONFieldWithDefault `json:"dataVersion,omitempty"` +} + +// NumberGreaterThanAdvancedFilter numberGreaterThan Filter +type NumberGreaterThanAdvancedFilter struct { + // Value - The filter value + Value *float64 `json:"value,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) MarshalJSON() ([]byte, error) { + ngtaf.OperatorType = OperatorTypeNumberGreaterThan + objectMap := make(map[string]interface{}) + if ngtaf.Value != nil { + objectMap["value"] = ngtaf.Value + } + if ngtaf.Key != nil { + objectMap["key"] = ngtaf.Key + } + if ngtaf.OperatorType != "" { + objectMap["operatorType"] = ngtaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return &ngtaf, true +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanAdvancedFilter. +func (ngtaf NumberGreaterThanAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &ngtaf, true +} + +// NumberGreaterThanOrEqualsAdvancedFilter numberGreaterThanOrEquals Filter +type NumberGreaterThanOrEqualsAdvancedFilter struct { + // Value - The filter value + Value *float64 `json:"value,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) MarshalJSON() ([]byte, error) { + ngtoeaf.OperatorType = OperatorTypeNumberGreaterThanOrEquals + objectMap := make(map[string]interface{}) + if ngtoeaf.Value != nil { + objectMap["value"] = ngtoeaf.Value + } + if ngtoeaf.Key != nil { + objectMap["key"] = ngtoeaf.Key + } + if ngtoeaf.OperatorType != "" { + objectMap["operatorType"] = ngtoeaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return &ngtoeaf, true +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for NumberGreaterThanOrEqualsAdvancedFilter. +func (ngtoeaf NumberGreaterThanOrEqualsAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &ngtoeaf, true +} + +// NumberInAdvancedFilter numberIn filter +type NumberInAdvancedFilter struct { + // Values - The set of filter values + Values *[]float64 `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) MarshalJSON() ([]byte, error) { + niaf.OperatorType = OperatorTypeNumberIn + objectMap := make(map[string]interface{}) + if niaf.Values != nil { + objectMap["values"] = niaf.Values + } + if niaf.Key != nil { + objectMap["key"] = niaf.Key + } + if niaf.OperatorType != "" { + objectMap["operatorType"] = niaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return &niaf, true +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for NumberInAdvancedFilter. +func (niaf NumberInAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &niaf, true +} + +// NumberLessThanAdvancedFilter numberLessThan Filter +type NumberLessThanAdvancedFilter struct { + // Value - The filter value + Value *float64 `json:"value,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) MarshalJSON() ([]byte, error) { + nltaf.OperatorType = OperatorTypeNumberLessThan + objectMap := make(map[string]interface{}) + if nltaf.Value != nil { + objectMap["value"] = nltaf.Value + } + if nltaf.Key != nil { + objectMap["key"] = nltaf.Key + } + if nltaf.OperatorType != "" { + objectMap["operatorType"] = nltaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return &nltaf, true +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanAdvancedFilter. +func (nltaf NumberLessThanAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &nltaf, true +} + +// NumberLessThanOrEqualsAdvancedFilter numberLessThanOrEquals Filter +type NumberLessThanOrEqualsAdvancedFilter struct { + // Value - The filter value + Value *float64 `json:"value,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) MarshalJSON() ([]byte, error) { + nltoeaf.OperatorType = OperatorTypeNumberLessThanOrEquals + objectMap := make(map[string]interface{}) + if nltoeaf.Value != nil { + objectMap["value"] = nltoeaf.Value + } + if nltoeaf.Key != nil { + objectMap["key"] = nltoeaf.Key + } + if nltoeaf.OperatorType != "" { + objectMap["operatorType"] = nltoeaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return &nltoeaf, true +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for NumberLessThanOrEqualsAdvancedFilter. +func (nltoeaf NumberLessThanOrEqualsAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &nltoeaf, true +} + +// NumberNotInAdvancedFilter numberNotIn Filter +type NumberNotInAdvancedFilter struct { + // Values - The set of filter values + Values *[]float64 `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) MarshalJSON() ([]byte, error) { + nniaf.OperatorType = OperatorTypeNumberNotIn + objectMap := make(map[string]interface{}) + if nniaf.Values != nil { + objectMap["values"] = nniaf.Values + } + if nniaf.Key != nil { + objectMap["key"] = nniaf.Key + } + if nniaf.OperatorType != "" { + objectMap["operatorType"] = nniaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return &nniaf, true +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for NumberNotInAdvancedFilter. +func (nniaf NumberNotInAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &nniaf, true +} + +// Operation represents an operation returned by the GetOperations request +type Operation struct { + // Name - Name of the operation + Name *string `json:"name,omitempty"` + // Display - Display name of the operation + Display *OperationInfo `json:"display,omitempty"` + // Origin - Origin of the operation + Origin *string `json:"origin,omitempty"` + // Properties - Properties of the operation + Properties interface{} `json:"properties,omitempty"` +} + +// OperationInfo information about an operation +type OperationInfo struct { + // Provider - Name of the provider + Provider *string `json:"provider,omitempty"` + // Resource - Name of the resource type + Resource *string `json:"resource,omitempty"` + // Operation - Name of the operation + Operation *string `json:"operation,omitempty"` + // Description - Description of the operation + Description *string `json:"description,omitempty"` +} + +// OperationsListResult result of the List Operations operation +type OperationsListResult struct { + autorest.Response `json:"-"` + // Value - A collection of operations + Value *[]Operation `json:"value,omitempty"` +} + +// Resource definition of a Resource +type Resource struct { + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// RetryPolicy information about the retry policy for an event subscription +type RetryPolicy struct { + // MaxDeliveryAttempts - Maximum number of delivery retry attempts for events. + MaxDeliveryAttempts *int32 `json:"maxDeliveryAttempts,omitempty"` + // EventTimeToLiveInMinutes - Time To Live (in minutes) for events. + EventTimeToLiveInMinutes *int32 `json:"eventTimeToLiveInMinutes,omitempty"` +} + +// StorageBlobDeadLetterDestination information about the storage blob based dead letter destination. +type StorageBlobDeadLetterDestination struct { + // StorageBlobDeadLetterDestinationProperties - The properties of the Storage Blob based deadletter destination + *StorageBlobDeadLetterDestinationProperties `json:"properties,omitempty"` + // EndpointType - Possible values include: 'EndpointTypeDeadLetterDestination', 'EndpointTypeStorageBlob' + EndpointType EndpointTypeBasicDeadLetterDestination `json:"endpointType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StorageBlobDeadLetterDestination. +func (sbdld StorageBlobDeadLetterDestination) MarshalJSON() ([]byte, error) { + sbdld.EndpointType = EndpointTypeStorageBlob + objectMap := make(map[string]interface{}) + if sbdld.StorageBlobDeadLetterDestinationProperties != nil { + objectMap["properties"] = sbdld.StorageBlobDeadLetterDestinationProperties + } + if sbdld.EndpointType != "" { + objectMap["endpointType"] = sbdld.EndpointType + } + return json.Marshal(objectMap) +} + +// AsStorageBlobDeadLetterDestination is the BasicDeadLetterDestination implementation for StorageBlobDeadLetterDestination. +func (sbdld StorageBlobDeadLetterDestination) AsStorageBlobDeadLetterDestination() (*StorageBlobDeadLetterDestination, bool) { + return &sbdld, true +} + +// AsDeadLetterDestination is the BasicDeadLetterDestination implementation for StorageBlobDeadLetterDestination. +func (sbdld StorageBlobDeadLetterDestination) AsDeadLetterDestination() (*DeadLetterDestination, bool) { + return nil, false +} + +// AsBasicDeadLetterDestination is the BasicDeadLetterDestination implementation for StorageBlobDeadLetterDestination. +func (sbdld StorageBlobDeadLetterDestination) AsBasicDeadLetterDestination() (BasicDeadLetterDestination, bool) { + return &sbdld, true +} + +// UnmarshalJSON is the custom unmarshaler for StorageBlobDeadLetterDestination struct. +func (sbdld *StorageBlobDeadLetterDestination) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var storageBlobDeadLetterDestinationProperties StorageBlobDeadLetterDestinationProperties + err = json.Unmarshal(*v, &storageBlobDeadLetterDestinationProperties) + if err != nil { + return err + } + sbdld.StorageBlobDeadLetterDestinationProperties = &storageBlobDeadLetterDestinationProperties + } + case "endpointType": + if v != nil { + var endpointType EndpointTypeBasicDeadLetterDestination + err = json.Unmarshal(*v, &endpointType) + if err != nil { + return err + } + sbdld.EndpointType = endpointType + } + } + } + + return nil +} + +// StorageBlobDeadLetterDestinationProperties properties of the storage blob based dead letter destination. +type StorageBlobDeadLetterDestinationProperties struct { + // ResourceID - The Azure Resource ID of the storage account that is the destination of the deadletter events + ResourceID *string `json:"resourceId,omitempty"` + // BlobContainerName - The name of the Storage blob container that is the destination of the deadletter events + BlobContainerName *string `json:"blobContainerName,omitempty"` +} + +// StorageQueueEventSubscriptionDestination information about the storage queue destination for an event +// subscription. +type StorageQueueEventSubscriptionDestination struct { + // StorageQueueEventSubscriptionDestinationProperties - Storage Queue Properties of the event subscription destination + *StorageQueueEventSubscriptionDestinationProperties `json:"properties,omitempty"` + // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection' + EndpointType EndpointType `json:"endpointType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) MarshalJSON() ([]byte, error) { + sqesd.EndpointType = EndpointTypeStorageQueue + objectMap := make(map[string]interface{}) + if sqesd.StorageQueueEventSubscriptionDestinationProperties != nil { + objectMap["properties"] = sqesd.StorageQueueEventSubscriptionDestinationProperties + } + if sqesd.EndpointType != "" { + objectMap["endpointType"] = sqesd.EndpointType + } + return json.Marshal(objectMap) +} + +// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { + return nil, false +} + +// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) { + return &sqesd, true +} + +// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { + return nil, false +} + +// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination. +func (sqesd StorageQueueEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { + return &sqesd, true +} + +// UnmarshalJSON is the custom unmarshaler for StorageQueueEventSubscriptionDestination struct. +func (sqesd *StorageQueueEventSubscriptionDestination) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var storageQueueEventSubscriptionDestinationProperties StorageQueueEventSubscriptionDestinationProperties + err = json.Unmarshal(*v, &storageQueueEventSubscriptionDestinationProperties) + if err != nil { + return err + } + sqesd.StorageQueueEventSubscriptionDestinationProperties = &storageQueueEventSubscriptionDestinationProperties + } + case "endpointType": + if v != nil { + var endpointType EndpointType + err = json.Unmarshal(*v, &endpointType) + if err != nil { + return err + } + sqesd.EndpointType = endpointType + } + } + } + + return nil +} + +// StorageQueueEventSubscriptionDestinationProperties the properties for a storage queue destination. +type StorageQueueEventSubscriptionDestinationProperties struct { + // ResourceID - The Azure Resource ID of the storage account that contains the queue that is the destination of an event subscription. + ResourceID *string `json:"resourceId,omitempty"` + // QueueName - The name of the Storage queue under a storage account that is the destination of an event subscription. + QueueName *string `json:"queueName,omitempty"` +} + +// StringBeginsWithAdvancedFilter stringBeginsWith Filter +type StringBeginsWithAdvancedFilter struct { + // Values - The set of filter values + Values *[]string `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) MarshalJSON() ([]byte, error) { + sbwaf.OperatorType = OperatorTypeStringBeginsWith + objectMap := make(map[string]interface{}) + if sbwaf.Values != nil { + objectMap["values"] = sbwaf.Values + } + if sbwaf.Key != nil { + objectMap["key"] = sbwaf.Key + } + if sbwaf.OperatorType != "" { + objectMap["operatorType"] = sbwaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return &sbwaf, true +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for StringBeginsWithAdvancedFilter. +func (sbwaf StringBeginsWithAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &sbwaf, true +} + +// StringContainsAdvancedFilter stringContains Filter +type StringContainsAdvancedFilter struct { + // Values - The set of filter values + Values *[]string `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) MarshalJSON() ([]byte, error) { + scaf.OperatorType = OperatorTypeStringContains + objectMap := make(map[string]interface{}) + if scaf.Values != nil { + objectMap["values"] = scaf.Values + } + if scaf.Key != nil { + objectMap["key"] = scaf.Key + } + if scaf.OperatorType != "" { + objectMap["operatorType"] = scaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return &scaf, true +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for StringContainsAdvancedFilter. +func (scaf StringContainsAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &scaf, true +} + +// StringEndsWithAdvancedFilter stringEndsWith Filter +type StringEndsWithAdvancedFilter struct { + // Values - The set of filter values + Values *[]string `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) MarshalJSON() ([]byte, error) { + sewaf.OperatorType = OperatorTypeStringEndsWith + objectMap := make(map[string]interface{}) + if sewaf.Values != nil { + objectMap["values"] = sewaf.Values + } + if sewaf.Key != nil { + objectMap["key"] = sewaf.Key + } + if sewaf.OperatorType != "" { + objectMap["operatorType"] = sewaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return &sewaf, true +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for StringEndsWithAdvancedFilter. +func (sewaf StringEndsWithAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &sewaf, true +} + +// StringInAdvancedFilter stringIn Filter +type StringInAdvancedFilter struct { + // Values - The set of filter values + Values *[]string `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) MarshalJSON() ([]byte, error) { + siaf.OperatorType = OperatorTypeStringIn + objectMap := make(map[string]interface{}) + if siaf.Values != nil { + objectMap["values"] = siaf.Values + } + if siaf.Key != nil { + objectMap["key"] = siaf.Key + } + if siaf.OperatorType != "" { + objectMap["operatorType"] = siaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return &siaf, true +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return nil, false +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for StringInAdvancedFilter. +func (siaf StringInAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &siaf, true +} + +// StringNotInAdvancedFilter stringNotIn Filter +type StringNotInAdvancedFilter struct { + // Values - The set of filter values + Values *[]string `json:"values,omitempty"` + // Key - The filter key. Represents an event property with up to two levels of nesting. + Key *string `json:"key,omitempty"` + // OperatorType - Possible values include: 'OperatorTypeAdvancedFilter', 'OperatorTypeNumberIn', 'OperatorTypeNumberNotIn', 'OperatorTypeNumberLessThan', 'OperatorTypeNumberGreaterThan', 'OperatorTypeNumberLessThanOrEquals', 'OperatorTypeNumberGreaterThanOrEquals', 'OperatorTypeBoolEquals', 'OperatorTypeStringIn', 'OperatorTypeStringNotIn', 'OperatorTypeStringBeginsWith', 'OperatorTypeStringEndsWith', 'OperatorTypeStringContains' + OperatorType OperatorType `json:"operatorType,omitempty"` +} + +// MarshalJSON is the custom marshaler for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) MarshalJSON() ([]byte, error) { + sniaf.OperatorType = OperatorTypeStringNotIn + objectMap := make(map[string]interface{}) + if sniaf.Values != nil { + objectMap["values"] = sniaf.Values + } + if sniaf.Key != nil { + objectMap["key"] = sniaf.Key + } + if sniaf.OperatorType != "" { + objectMap["operatorType"] = sniaf.OperatorType + } + return json.Marshal(objectMap) +} + +// AsNumberInAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsNumberInAdvancedFilter() (*NumberInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsNumberNotInAdvancedFilter() (*NumberNotInAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsNumberLessThanAdvancedFilter() (*NumberLessThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsNumberGreaterThanAdvancedFilter() (*NumberGreaterThanAdvancedFilter, bool) { + return nil, false +} + +// AsNumberLessThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsNumberLessThanOrEqualsAdvancedFilter() (*NumberLessThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsNumberGreaterThanOrEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsNumberGreaterThanOrEqualsAdvancedFilter() (*NumberGreaterThanOrEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsBoolEqualsAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsBoolEqualsAdvancedFilter() (*BoolEqualsAdvancedFilter, bool) { + return nil, false +} + +// AsStringInAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsStringInAdvancedFilter() (*StringInAdvancedFilter, bool) { + return nil, false +} + +// AsStringNotInAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsStringNotInAdvancedFilter() (*StringNotInAdvancedFilter, bool) { + return &sniaf, true +} + +// AsStringBeginsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsStringBeginsWithAdvancedFilter() (*StringBeginsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringEndsWithAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsStringEndsWithAdvancedFilter() (*StringEndsWithAdvancedFilter, bool) { + return nil, false +} + +// AsStringContainsAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsStringContainsAdvancedFilter() (*StringContainsAdvancedFilter, bool) { + return nil, false +} + +// AsAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsAdvancedFilter() (*AdvancedFilter, bool) { + return nil, false +} + +// AsBasicAdvancedFilter is the BasicAdvancedFilter implementation for StringNotInAdvancedFilter. +func (sniaf StringNotInAdvancedFilter) AsBasicAdvancedFilter() (BasicAdvancedFilter, bool) { + return &sniaf, true +} + +// Topic eventGrid Topic +type Topic struct { + autorest.Response `json:"-"` + // TopicProperties - Properties of the topic + *TopicProperties `json:"properties,omitempty"` + // Location - Location of the resource + Location *string `json:"location,omitempty"` + // Tags - Tags of the resource + Tags map[string]*string `json:"tags"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Topic. +func (t Topic) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if t.TopicProperties != nil { + objectMap["properties"] = t.TopicProperties + } + if t.Location != nil { + objectMap["location"] = t.Location + } + if t.Tags != nil { + objectMap["tags"] = t.Tags + } + if t.ID != nil { + objectMap["id"] = t.ID + } + if t.Name != nil { + objectMap["name"] = t.Name + } + if t.Type != nil { + objectMap["type"] = t.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Topic struct. +func (t *Topic) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var topicProperties TopicProperties + err = json.Unmarshal(*v, &topicProperties) + if err != nil { + return err + } + t.TopicProperties = &topicProperties + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + t.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + t.Tags = tags + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + t.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + t.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + t.Type = &typeVar + } + } + } + + return nil +} + +// TopicProperties properties of the Topic +type TopicProperties struct { + // ProvisioningState - Provisioning state of the topic. Possible values include: 'TopicProvisioningStateCreating', 'TopicProvisioningStateUpdating', 'TopicProvisioningStateDeleting', 'TopicProvisioningStateSucceeded', 'TopicProvisioningStateCanceled', 'TopicProvisioningStateFailed' + ProvisioningState TopicProvisioningState `json:"provisioningState,omitempty"` + // Endpoint - Endpoint for the topic. + Endpoint *string `json:"endpoint,omitempty"` + // InputSchema - This determines the format that Event Grid should expect for incoming events published to the topic. Possible values include: 'InputSchemaEventGridSchema', 'InputSchemaCustomEventSchema', 'InputSchemaCloudEventV01Schema' + InputSchema InputSchema `json:"inputSchema,omitempty"` + // InputSchemaMapping - This enables publishing using custom event schemas. An InputSchemaMapping can be specified to map various properties of a source schema to various required properties of the EventGridEvent schema. + InputSchemaMapping BasicInputSchemaMapping `json:"inputSchemaMapping,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for TopicProperties struct. +func (tp *TopicProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "provisioningState": + if v != nil { + var provisioningState TopicProvisioningState + err = json.Unmarshal(*v, &provisioningState) + if err != nil { + return err + } + tp.ProvisioningState = provisioningState + } + case "endpoint": + if v != nil { + var endpoint string + err = json.Unmarshal(*v, &endpoint) + if err != nil { + return err + } + tp.Endpoint = &endpoint + } + case "inputSchema": + if v != nil { + var inputSchema InputSchema + err = json.Unmarshal(*v, &inputSchema) + if err != nil { + return err + } + tp.InputSchema = inputSchema + } + case "inputSchemaMapping": + if v != nil { + inputSchemaMapping, err := unmarshalBasicInputSchemaMapping(*v) + if err != nil { + return err + } + tp.InputSchemaMapping = inputSchemaMapping + } + } + } + + return nil +} + +// TopicRegenerateKeyRequest topic regenerate share access key request +type TopicRegenerateKeyRequest struct { + // KeyName - Key name to regenerate key1 or key2 + KeyName *string `json:"keyName,omitempty"` +} + +// TopicsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type TopicsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *TopicsCreateOrUpdateFuture) Result(client TopicsClient) (t Topic, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.TopicsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.CreateOrUpdateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.TopicsCreateOrUpdateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + +// TopicsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type TopicsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *TopicsDeleteFuture) Result(client TopicsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.TopicsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// TopicSharedAccessKeys shared access keys of the Topic +type TopicSharedAccessKeys struct { + autorest.Response `json:"-"` + // Key1 - Shared access key1 for the topic. + Key1 *string `json:"key1,omitempty"` + // Key2 - Shared access key2 for the topic. + Key2 *string `json:"key2,omitempty"` +} + +// TopicsListResult result of the List Topics operation +type TopicsListResult struct { + autorest.Response `json:"-"` + // Value - A collection of Topics + Value *[]Topic `json:"value,omitempty"` +} + +// TopicsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type TopicsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *TopicsUpdateFuture) Result(client TopicsClient) (t Topic, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.TopicsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent { + t, err = client.UpdateResponder(t.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "eventgrid.TopicsUpdateFuture", "Result", t.Response.Response, "Failure responding to request") + } + } + return +} + +// TopicTypeInfo properties of a topic type info. +type TopicTypeInfo struct { + autorest.Response `json:"-"` + // TopicTypeProperties - Properties of the topic type info + *TopicTypeProperties `json:"properties,omitempty"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TopicTypeInfo. +func (tti TopicTypeInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tti.TopicTypeProperties != nil { + objectMap["properties"] = tti.TopicTypeProperties + } + if tti.ID != nil { + objectMap["id"] = tti.ID + } + if tti.Name != nil { + objectMap["name"] = tti.Name + } + if tti.Type != nil { + objectMap["type"] = tti.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for TopicTypeInfo struct. +func (tti *TopicTypeInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var topicTypeProperties TopicTypeProperties + err = json.Unmarshal(*v, &topicTypeProperties) + if err != nil { + return err + } + tti.TopicTypeProperties = &topicTypeProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + tti.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + tti.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + tti.Type = &typeVar + } + } + } + + return nil +} + +// TopicTypeProperties properties of a topic type. +type TopicTypeProperties struct { + // Provider - Namespace of the provider of the topic type. + Provider *string `json:"provider,omitempty"` + // DisplayName - Display Name for the topic type. + DisplayName *string `json:"displayName,omitempty"` + // Description - Description of the topic type. + Description *string `json:"description,omitempty"` + // ResourceRegionType - Region type of the resource. Possible values include: 'RegionalResource', 'GlobalResource' + ResourceRegionType ResourceRegionType `json:"resourceRegionType,omitempty"` + // ProvisioningState - Provisioning state of the topic type. Possible values include: 'TopicTypeProvisioningStateCreating', 'TopicTypeProvisioningStateUpdating', 'TopicTypeProvisioningStateDeleting', 'TopicTypeProvisioningStateSucceeded', 'TopicTypeProvisioningStateCanceled', 'TopicTypeProvisioningStateFailed' + ProvisioningState TopicTypeProvisioningState `json:"provisioningState,omitempty"` + // SupportedLocations - List of locations supported by this topic type. + SupportedLocations *[]string `json:"supportedLocations,omitempty"` +} + +// TopicTypesListResult result of the List Topic Types operation +type TopicTypesListResult struct { + autorest.Response `json:"-"` + // Value - A collection of topic types + Value *[]TopicTypeInfo `json:"value,omitempty"` +} + +// TopicUpdateParameters properties of the Topic update +type TopicUpdateParameters struct { + // Tags - Tags of the resource + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for TopicUpdateParameters. +func (tup TopicUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tup.Tags != nil { + objectMap["tags"] = tup.Tags + } + return json.Marshal(objectMap) +} + +// TrackedResource definition of a Tracked Resource +type TrackedResource struct { + // Location - Location of the resource + Location *string `json:"location,omitempty"` + // Tags - Tags of the resource + Tags map[string]*string `json:"tags"` + // ID - Fully qualified identifier of the resource + ID *string `json:"id,omitempty"` + // Name - Name of the resource + Name *string `json:"name,omitempty"` + // Type - Type of the resource + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TrackedResource. +func (tr TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tr.Location != nil { + objectMap["location"] = tr.Location + } + if tr.Tags != nil { + objectMap["tags"] = tr.Tags + } + if tr.ID != nil { + objectMap["id"] = tr.ID + } + if tr.Name != nil { + objectMap["name"] = tr.Name + } + if tr.Type != nil { + objectMap["type"] = tr.Type + } + return json.Marshal(objectMap) +} + +// WebHookEventSubscriptionDestination information about the webhook destination for an event subscription +type WebHookEventSubscriptionDestination struct { + // WebHookEventSubscriptionDestinationProperties - WebHook Properties of the event subscription destination + *WebHookEventSubscriptionDestinationProperties `json:"properties,omitempty"` + // EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection' + EndpointType EndpointType `json:"endpointType,omitempty"` +} + +// MarshalJSON is the custom marshaler for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) MarshalJSON() ([]byte, error) { + whesd.EndpointType = EndpointTypeWebHook + objectMap := make(map[string]interface{}) + if whesd.WebHookEventSubscriptionDestinationProperties != nil { + objectMap["properties"] = whesd.WebHookEventSubscriptionDestinationProperties + } + if whesd.EndpointType != "" { + objectMap["endpointType"] = whesd.EndpointType + } + return json.Marshal(objectMap) +} + +// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) { + return &whesd, true +} + +// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) { + return nil, false +} + +// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) { + return nil, false +} + +// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) { + return nil, false +} + +// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) { + return nil, false +} + +// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination. +func (whesd WebHookEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) { + return &whesd, true +} + +// UnmarshalJSON is the custom unmarshaler for WebHookEventSubscriptionDestination struct. +func (whesd *WebHookEventSubscriptionDestination) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var webHookEventSubscriptionDestinationProperties WebHookEventSubscriptionDestinationProperties + err = json.Unmarshal(*v, &webHookEventSubscriptionDestinationProperties) + if err != nil { + return err + } + whesd.WebHookEventSubscriptionDestinationProperties = &webHookEventSubscriptionDestinationProperties + } + case "endpointType": + if v != nil { + var endpointType EndpointType + err = json.Unmarshal(*v, &endpointType) + if err != nil { + return err + } + whesd.EndpointType = endpointType + } + } + } + + return nil +} + +// WebHookEventSubscriptionDestinationProperties information about the webhook destination properties for +// an event subscription. +type WebHookEventSubscriptionDestinationProperties struct { + // EndpointURL - The URL that represents the endpoint of the destination of an event subscription. + EndpointURL *string `json:"endpointUrl,omitempty"` + // EndpointBaseURL - The base URL that represents the endpoint of the destination of an event subscription. + EndpointBaseURL *string `json:"endpointBaseUrl,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/operations.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/operations.go index 39d476869d00..fa4dc1813184 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/operations.go @@ -75,7 +75,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationsListR // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/topics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/topics.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/topics.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/topics.go index 38073512d5ee..c6765951ae49 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/topics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/topics.go @@ -80,7 +80,7 @@ func (client TopicsClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "topicName": autorest.Encode("path", topicName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -159,7 +159,7 @@ func (client TopicsClient) DeletePreparer(ctx context.Context, resourceGroupName "topicName": autorest.Encode("path", topicName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -241,7 +241,7 @@ func (client TopicsClient) GetPreparer(ctx context.Context, resourceGroupName st "topicName": autorest.Encode("path", topicName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -316,7 +316,7 @@ func (client TopicsClient) ListByResourceGroupPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -388,7 +388,7 @@ func (client TopicsClient) ListBySubscriptionPreparer(ctx context.Context) (*htt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -469,7 +469,7 @@ func (client TopicsClient) ListEventTypesPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -546,7 +546,7 @@ func (client TopicsClient) ListSharedAccessKeysPreparer(ctx context.Context, res "topicName": autorest.Encode("path", topicName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -630,7 +630,7 @@ func (client TopicsClient) RegenerateKeyPreparer(ctx context.Context, resourceGr "topicName": autorest.Encode("path", topicName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -704,7 +704,7 @@ func (client TopicsClient) UpdatePreparer(ctx context.Context, resourceGroupName "topicName": autorest.Encode("path", topicName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/topictypes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/topictypes.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/topictypes.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/topictypes.go index a19cd7c0fefd..dd0785ade936 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/topictypes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/topictypes.go @@ -81,7 +81,7 @@ func (client TopicTypesClient) GetPreparer(ctx context.Context, topicTypeName st "topicTypeName": autorest.Encode("path", topicTypeName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -149,7 +149,7 @@ func (client TopicTypesClient) List(ctx context.Context) (result TopicTypesListR // ListPreparer prepares the List request. func (client TopicTypesClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -223,7 +223,7 @@ func (client TopicTypesClient) ListEventTypesPreparer(ctx context.Context, topic "topicTypeName": autorest.Encode("path", topicTypeName), } - const APIVersion = "2018-01-01" + const APIVersion = "2018-09-15-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/version.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/version.go index 12eed0452075..8a5b18bcded6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " eventgrid/2018-01-01" + return "Azure-SDK-For-Go/" + version.Number + " eventgrid/2018-09-15-preview" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/certificates.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/certificates.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/certificates.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/certificates.go index 2e9f3ed7ea27..98d2d47248e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/certificates.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/certificates.go @@ -96,7 +96,7 @@ func (client CertificatesClient) CreateOrUpdatePreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -188,7 +188,7 @@ func (client CertificatesClient) DeletePreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -275,7 +275,7 @@ func (client CertificatesClient) GenerateVerificationCodePreparer(ctx context.Co "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -361,7 +361,7 @@ func (client CertificatesClient) GetPreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -438,7 +438,7 @@ func (client CertificatesClient) ListByIotHubPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -526,7 +526,7 @@ func (client CertificatesClient) VerifyPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/client.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/client.go index cbe7e7e40a2f..661934a51c75 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/client.go @@ -1,4 +1,4 @@ -// Package devices implements the Azure ARM Devices service API version 2018-04-01. +// Package devices implements the Azure ARM Devices service API version 2018-12-01-preview. // // Use this API to manage the IoT hubs in your Azure subscription. package devices diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/iothubresource.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/iothubresource.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/iothubresource.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/iothubresource.go index 0a46ca3c724f..2f707500e1ac 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/iothubresource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/iothubresource.go @@ -89,7 +89,7 @@ func (client IotHubResourceClient) CheckNameAvailabilityPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -172,7 +172,7 @@ func (client IotHubResourceClient) CreateEventHubConsumerGroupPreparer(ctx conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -278,7 +278,7 @@ func (client IotHubResourceClient) CreateOrUpdatePreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -361,7 +361,7 @@ func (client IotHubResourceClient) DeletePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -448,7 +448,7 @@ func (client IotHubResourceClient) DeleteEventHubConsumerGroupPreparer(ctx conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -534,7 +534,7 @@ func (client IotHubResourceClient) ExportDevicesPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -613,7 +613,7 @@ func (client IotHubResourceClient) GetPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -688,7 +688,7 @@ func (client IotHubResourceClient) GetEndpointHealthPreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -806,7 +806,7 @@ func (client IotHubResourceClient) GetEventHubConsumerGroupPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -886,7 +886,7 @@ func (client IotHubResourceClient) GetJobPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -966,7 +966,7 @@ func (client IotHubResourceClient) GetKeysForKeyNamePreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1044,7 +1044,7 @@ func (client IotHubResourceClient) GetQuotaMetricsPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1158,7 +1158,7 @@ func (client IotHubResourceClient) GetStatsPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1236,7 +1236,7 @@ func (client IotHubResourceClient) GetValidSkusPreparer(ctx context.Context, res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1360,7 +1360,7 @@ func (client IotHubResourceClient) ImportDevicesPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1438,7 +1438,7 @@ func (client IotHubResourceClient) ListByResourceGroupPreparer(ctx context.Conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1548,7 +1548,7 @@ func (client IotHubResourceClient) ListBySubscriptionPreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1666,7 +1666,7 @@ func (client IotHubResourceClient) ListEventHubConsumerGroupsPreparer(ctx contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1782,7 +1782,7 @@ func (client IotHubResourceClient) ListJobsPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1898,7 +1898,7 @@ func (client IotHubResourceClient) ListKeysPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2013,7 +2013,7 @@ func (client IotHubResourceClient) TestAllRoutesPreparer(ctx context.Context, in "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2107,7 +2107,7 @@ func (client IotHubResourceClient) TestRoutePreparer(ctx context.Context, input "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2181,7 +2181,7 @@ func (client IotHubResourceClient) UpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/models.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/models.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/models.go index 59cd3fedce44..cb7a69fe75f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/models.go @@ -29,7 +29,7 @@ import ( ) // The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices" +const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices" // AccessRights enumerates the values for access rights. type AccessRights string @@ -87,6 +87,23 @@ func PossibleCapabilitiesValues() []Capabilities { return []Capabilities{DeviceManagement, None} } +// Encoding enumerates the values for encoding. +type Encoding string + +const ( + // Avro ... + Avro Encoding = "Avro" + // AvroDeflate ... + AvroDeflate Encoding = "AvroDeflate" + // JSON ... + JSON Encoding = "JSON" +) + +// PossibleEncodingValues returns an array of possible values for the Encoding const type. +func PossibleEncodingValues() []Encoding { + return []Encoding{Avro, AvroDeflate, JSON} +} + // EndpointHealthStatus enumerates the values for endpoint health status. type EndpointHealthStatus string @@ -1066,6 +1083,8 @@ type IotHubProperties struct { // Comments - IoT hub comments. Comments *string `json:"comments,omitempty"` OperationsMonitoringProperties *OperationsMonitoringProperties `json:"operationsMonitoringProperties,omitempty"` + // DeviceStreams - The device streams properties of iothub. + DeviceStreams *IotHubPropertiesDeviceStreams `json:"deviceStreams,omitempty"` // Features - The capabilities and features enabled for the IoT hub. Possible values include: 'None', 'DeviceManagement' Features Capabilities `json:"features,omitempty"` } @@ -1112,12 +1131,21 @@ func (ihp IotHubProperties) MarshalJSON() ([]byte, error) { if ihp.OperationsMonitoringProperties != nil { objectMap["operationsMonitoringProperties"] = ihp.OperationsMonitoringProperties } + if ihp.DeviceStreams != nil { + objectMap["deviceStreams"] = ihp.DeviceStreams + } if ihp.Features != "" { objectMap["features"] = ihp.Features } return json.Marshal(objectMap) } +// IotHubPropertiesDeviceStreams the device streams properties of iothub. +type IotHubPropertiesDeviceStreams struct { + // StreamingEndpoints - List of Device Streams Endpoints. + StreamingEndpoints *[]string `json:"streamingEndpoints,omitempty"` +} + // IotHubQuotaMetricInfo quota metrics properties. type IotHubQuotaMetricInfo struct { // Name - The name of the quota metric. @@ -1748,6 +1776,8 @@ type OperationDisplay struct { Resource *string `json:"resource,omitempty"` // Operation - Name of the operation Operation *string `json:"operation,omitempty"` + // Description - Description of the operation + Description *string `json:"description,omitempty"` } // OperationInputs input values. @@ -2109,8 +2139,8 @@ type RoutingStorageContainerProperties struct { BatchFrequencyInSeconds *int32 `json:"batchFrequencyInSeconds,omitempty"` // MaxChunkSizeInBytes - Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB). MaxChunkSizeInBytes *int32 `json:"maxChunkSizeInBytes,omitempty"` - // Encoding - Encoding that is used to serialize messages to blobs. Supported values are 'avro' and 'avroDeflate'. Default value is 'avro'. - Encoding *string `json:"encoding,omitempty"` + // Encoding - Encoding that is used to serialize messages to blobs. Supported values are 'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: 'Avro', 'AvroDeflate', 'JSON' + Encoding Encoding `json:"encoding,omitempty"` } // RoutingTwin twin reference input parameter. This is an optional parameter @@ -2122,10 +2152,10 @@ type RoutingTwin struct { // RoutingTwinProperties ... type RoutingTwinProperties struct { - // DesiredProperties - Twin desired properties - DesiredProperties interface{} `json:"desiredProperties,omitempty"` - // ReportedProperties - Twin desired properties - ReportedProperties interface{} `json:"reportedProperties,omitempty"` + // Desired - Twin desired properties + Desired interface{} `json:"desired,omitempty"` + // Reported - Twin desired properties + Reported interface{} `json:"reported,omitempty"` } // SetObject ... diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/operations.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/operations.go index 6c4d7a65e7d6..c5028e718c31 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/operations.go @@ -76,7 +76,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/resourceprovidercommon.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/resourceprovidercommon.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/resourceprovidercommon.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/resourceprovidercommon.go index a95e69a5a2dc..37251935e810 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/resourceprovidercommon.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/resourceprovidercommon.go @@ -79,7 +79,7 @@ func (client ResourceProviderCommonClient) GetSubscriptionQuotaPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-12-01-preview" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/version.go similarity index 93% rename from vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/version.go index 31c3eebda3bc..39ec68069ce7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " devices/2018-04-01" + return "Azure-SDK-For-Go/" + version.Number + " devices/2018-12-01-preview" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/models.go index f9d944e2d421..caecc3f08e35 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/models.go @@ -147,35 +147,20 @@ func PossibleFamilyValues() []Family { // KindEnum enumerates the values for kind enum. type KindEnum string -const ( - // KindDataExportSetting ... - KindDataExportSetting KindEnum = "DataExportSetting" - // KindSetting ... - KindSetting KindEnum = "Setting" -) - -// PossibleKindEnumValues returns an array of possible values for the KindEnum const type. -func PossibleKindEnumValues() []KindEnum { - return []KindEnum{KindDataExportSetting, KindSetting} -} - -// KindEnum1 enumerates the values for kind enum 1. -type KindEnum1 string - const ( // KindAAD ... - KindAAD KindEnum1 = "AAD" + KindAAD KindEnum = "AAD" // KindATA ... - KindATA KindEnum1 = "ATA" + KindATA KindEnum = "ATA" // KindCEF ... - KindCEF KindEnum1 = "CEF" + KindCEF KindEnum = "CEF" // KindExternalSecuritySolution ... - KindExternalSecuritySolution KindEnum1 = "ExternalSecuritySolution" + KindExternalSecuritySolution KindEnum = "ExternalSecuritySolution" ) -// PossibleKindEnum1Values returns an array of possible values for the KindEnum1 const type. -func PossibleKindEnum1Values() []KindEnum1 { - return []KindEnum1{KindAAD, KindATA, KindCEF, KindExternalSecuritySolution} +// PossibleKindEnumValues returns an array of possible values for the KindEnum const type. +func PossibleKindEnumValues() []KindEnum { + return []KindEnum{KindAAD, KindATA, KindCEF, KindExternalSecuritySolution} } // PricingTier enumerates the values for pricing tier. @@ -214,13 +199,15 @@ func PossibleProtocolValues() []Protocol { type SettingKind string const ( + // SettingKindAlertSuppressionSetting ... + SettingKindAlertSuppressionSetting SettingKind = "AlertSuppressionSetting" // SettingKindDataExportSetting ... SettingKindDataExportSetting SettingKind = "DataExportSetting" ) // PossibleSettingKindValues returns an array of possible values for the SettingKind const type. func PossibleSettingKindValues() []SettingKind { - return []SettingKind{SettingKindDataExportSetting} + return []SettingKind{SettingKindAlertSuppressionSetting, SettingKindDataExportSetting} } // Status enumerates the values for status. @@ -274,7 +261,7 @@ type AadExternalSecuritySolution struct { // Location - Location where the resource is stored Location *string `json:"location,omitempty"` // Kind - Possible values include: 'KindExternalSecuritySolution', 'KindCEF', 'KindATA', 'KindAAD' - Kind KindEnum1 `json:"kind,omitempty"` + Kind KindEnum `json:"kind,omitempty"` } // MarshalJSON is the custom marshaler for AadExternalSecuritySolution. @@ -1246,7 +1233,7 @@ type AtaExternalSecuritySolution struct { // Location - Location where the resource is stored Location *string `json:"location,omitempty"` // Kind - Possible values include: 'KindExternalSecuritySolution', 'KindCEF', 'KindATA', 'KindAAD' - Kind KindEnum1 `json:"kind,omitempty"` + Kind KindEnum `json:"kind,omitempty"` } // MarshalJSON is the custom marshaler for AtaExternalSecuritySolution. @@ -1640,7 +1627,7 @@ type CefExternalSecuritySolution struct { // Location - Location where the resource is stored Location *string `json:"location,omitempty"` // Kind - Possible values include: 'KindExternalSecuritySolution', 'KindCEF', 'KindATA', 'KindAAD' - Kind KindEnum1 `json:"kind,omitempty"` + Kind KindEnum `json:"kind,omitempty"` } // MarshalJSON is the custom marshaler for CefExternalSecuritySolution. @@ -2378,23 +2365,25 @@ type ContactProperties struct { type DataExportSetting struct { // DataExportSettingProperties - Data export setting data *DataExportSettingProperties `json:"properties,omitempty"` + // Kind - the kind of the settings string (DataExportSetting). Possible values include: 'SettingKindDataExportSetting', 'SettingKindAlertSuppressionSetting' + Kind SettingKind `json:"kind,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` // Name - Resource name Name *string `json:"name,omitempty"` // Type - Resource type Type *string `json:"type,omitempty"` - // Kind - Possible values include: 'KindSetting', 'KindDataExportSetting' - Kind KindEnum `json:"kind,omitempty"` } // MarshalJSON is the custom marshaler for DataExportSetting. func (desVar DataExportSetting) MarshalJSON() ([]byte, error) { - desVar.Kind = KindDataExportSetting objectMap := make(map[string]interface{}) if desVar.DataExportSettingProperties != nil { objectMap["properties"] = desVar.DataExportSettingProperties } + if desVar.Kind != "" { + objectMap["kind"] = desVar.Kind + } if desVar.ID != nil { objectMap["id"] = desVar.ID } @@ -2404,27 +2393,9 @@ func (desVar DataExportSetting) MarshalJSON() ([]byte, error) { if desVar.Type != nil { objectMap["type"] = desVar.Type } - if desVar.Kind != "" { - objectMap["kind"] = desVar.Kind - } return json.Marshal(objectMap) } -// AsDataExportSetting is the BasicSetting implementation for DataExportSetting. -func (desVar DataExportSetting) AsDataExportSetting() (*DataExportSetting, bool) { - return &desVar, true -} - -// AsSetting is the BasicSetting implementation for DataExportSetting. -func (desVar DataExportSetting) AsSetting() (*Setting, bool) { - return nil, false -} - -// AsBasicSetting is the BasicSetting implementation for DataExportSetting. -func (desVar DataExportSetting) AsBasicSetting() (BasicSetting, bool) { - return &desVar, true -} - // UnmarshalJSON is the custom unmarshaler for DataExportSetting struct. func (desVar *DataExportSetting) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage @@ -2443,6 +2414,15 @@ func (desVar *DataExportSetting) UnmarshalJSON(body []byte) error { } desVar.DataExportSettingProperties = &dataExportSettingProperties } + case "kind": + if v != nil { + var kind SettingKind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + desVar.Kind = kind + } case "id": if v != nil { var ID string @@ -2470,15 +2450,6 @@ func (desVar *DataExportSetting) UnmarshalJSON(body []byte) error { } desVar.Type = &typeVar } - case "kind": - if v != nil { - var kind KindEnum - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - desVar.Kind = kind - } } } @@ -2766,7 +2737,7 @@ type ExternalSecuritySolution struct { // Location - Location where the resource is stored Location *string `json:"location,omitempty"` // Kind - Possible values include: 'KindExternalSecuritySolution', 'KindCEF', 'KindATA', 'KindAAD' - Kind KindEnum1 `json:"kind,omitempty"` + Kind KindEnum `json:"kind,omitempty"` } func unmarshalBasicExternalSecuritySolution(body []byte) (BasicExternalSecuritySolution, error) { @@ -4199,160 +4170,40 @@ type SensitivityLabel struct { Enabled *bool `json:"enabled,omitempty"` } -// BasicSetting represents a security setting in Azure Security Center. -type BasicSetting interface { - AsDataExportSetting() (*DataExportSetting, bool) - AsSetting() (*Setting, bool) -} - // Setting represents a security setting in Azure Security Center. type Setting struct { autorest.Response `json:"-"` + // Kind - the kind of the settings string (DataExportSetting). Possible values include: 'SettingKindDataExportSetting', 'SettingKindAlertSuppressionSetting' + Kind SettingKind `json:"kind,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` // Name - Resource name Name *string `json:"name,omitempty"` // Type - Resource type Type *string `json:"type,omitempty"` - // Kind - Possible values include: 'KindSetting', 'KindDataExportSetting' - Kind KindEnum `json:"kind,omitempty"` } -func unmarshalBasicSetting(body []byte) (BasicSetting, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["kind"] { - case string(KindDataExportSetting): - var desVar DataExportSetting - err := json.Unmarshal(body, &desVar) - return desVar, err - default: - var s Setting - err := json.Unmarshal(body, &s) - return s, err - } -} -func unmarshalBasicSettingArray(body []byte) ([]BasicSetting, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - sArray := make([]BasicSetting, len(rawMessages)) - - for index, rawMessage := range rawMessages { - s, err := unmarshalBasicSetting(*rawMessage) - if err != nil { - return nil, err - } - sArray[index] = s - } - return sArray, nil -} - -// MarshalJSON is the custom marshaler for Setting. -func (s Setting) MarshalJSON() ([]byte, error) { - s.Kind = KindSetting - objectMap := make(map[string]interface{}) - if s.ID != nil { - objectMap["id"] = s.ID - } - if s.Name != nil { - objectMap["name"] = s.Name - } - if s.Type != nil { - objectMap["type"] = s.Type - } - if s.Kind != "" { - objectMap["kind"] = s.Kind - } - return json.Marshal(objectMap) -} - -// AsDataExportSetting is the BasicSetting implementation for Setting. -func (s Setting) AsDataExportSetting() (*DataExportSetting, bool) { - return nil, false -} - -// AsSetting is the BasicSetting implementation for Setting. -func (s Setting) AsSetting() (*Setting, bool) { - return &s, true -} - -// AsBasicSetting is the BasicSetting implementation for Setting. -func (s Setting) AsBasicSetting() (BasicSetting, bool) { - return &s, true -} - -// SettingKind1 the kind of the security setting -type SettingKind1 struct { - // Kind - the kind of the settings string. Possible values include: 'SettingKindDataExportSetting' +// SettingResource the kind of the security setting +type SettingResource struct { + // Kind - the kind of the settings string (DataExportSetting). Possible values include: 'SettingKindDataExportSetting', 'SettingKindAlertSuppressionSetting' Kind SettingKind `json:"kind,omitempty"` -} - -// SettingModel ... -type SettingModel struct { - autorest.Response `json:"-"` - Value BasicSetting `json:"value,omitempty"` -} - -// UnmarshalJSON is the custom unmarshaler for SettingModel struct. -func (sm *SettingModel) UnmarshalJSON(body []byte) error { - s, err := unmarshalBasicSetting(body) - if err != nil { - return err - } - sm.Value = s - - return nil + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` } // SettingsList subscription settings list. type SettingsList struct { autorest.Response `json:"-"` // Value - The settings list. - Value *[]BasicSetting `json:"value,omitempty"` + Value *[]Setting `json:"value,omitempty"` // NextLink - The URI to fetch the next page. NextLink *string `json:"nextLink,omitempty"` } -// UnmarshalJSON is the custom unmarshaler for SettingsList struct. -func (sl *SettingsList) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "value": - if v != nil { - value, err := unmarshalBasicSettingArray(*v) - if err != nil { - return err - } - sl.Value = &value - } - case "nextLink": - if v != nil { - var nextLink string - err = json.Unmarshal(*v, &nextLink) - if err != nil { - return err - } - sl.NextLink = &nextLink - } - } - } - - return nil -} - // SettingsListIterator provides access to a complete listing of Setting values. type SettingsListIterator struct { i int @@ -4404,7 +4255,7 @@ func (iter SettingsListIterator) Response() SettingsList { // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter SettingsListIterator) Value() BasicSetting { +func (iter SettingsListIterator) Value() Setting { if !iter.page.NotDone() { return Setting{} } @@ -4433,7 +4284,7 @@ func (sl SettingsList) settingsListPreparer(ctx context.Context) (*http.Request, autorest.WithBaseURL(to.String(sl.NextLink))) } -// SettingsListPage contains a page of BasicSetting values. +// SettingsListPage contains a page of Setting values. type SettingsListPage struct { fn func(context.Context, SettingsList) (SettingsList, error) sl SettingsList @@ -4478,7 +4329,7 @@ func (page SettingsListPage) Response() SettingsList { } // Values returns the slice of values for the current page or nil if there are no values. -func (page SettingsListPage) Values() []BasicSetting { +func (page SettingsListPage) Values() []Setting { if page.sl.IsEmpty() { return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/settings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/settings.go index 3b45ca6bda24..2ea41e078b28 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/settings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/settings.go @@ -43,8 +43,8 @@ func NewSettingsClientWithBaseURI(baseURI string, subscriptionID string, ascLoca // Get settings of different configurations in security center // Parameters: -// settingName - name of setting -func (client SettingsClient) Get(ctx context.Context, settingName string) (result SettingModel, err error) { +// settingName - name of setting: (MCAS/WDATP) +func (client SettingsClient) Get(ctx context.Context, settingName string) (result Setting, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/SettingsClient.Get") defer func() { @@ -111,7 +111,7 @@ func (client SettingsClient) GetSender(req *http.Request) (*http.Response, error // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. -func (client SettingsClient) GetResponder(resp *http.Response) (result SettingModel, err error) { +func (client SettingsClient) GetResponder(resp *http.Response) (result Setting, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -240,9 +240,9 @@ func (client SettingsClient) ListComplete(ctx context.Context) (result SettingsL // Update updating settings about different configurations in security center // Parameters: -// settingName - name of setting +// settingName - name of setting: (MCAS/WDATP) // setting - setting object -func (client SettingsClient) Update(ctx context.Context, settingName string, setting BasicSetting) (result SettingModel, err error) { +func (client SettingsClient) Update(ctx context.Context, settingName string, setting Setting) (result Setting, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/SettingsClient.Update") defer func() { @@ -281,7 +281,7 @@ func (client SettingsClient) Update(ctx context.Context, settingName string, set } // UpdatePreparer prepares the Update request. -func (client SettingsClient) UpdatePreparer(ctx context.Context, settingName string, setting BasicSetting) (*http.Request, error) { +func (client SettingsClient) UpdatePreparer(ctx context.Context, settingName string, setting Setting) (*http.Request, error) { pathParameters := map[string]interface{}{ "settingName": autorest.Encode("path", settingName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), @@ -311,7 +311,7 @@ func (client SettingsClient) UpdateSender(req *http.Request) (*http.Response, er // UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. -func (client SettingsClient) UpdateResponder(resp *http.Response) (result SettingModel, err error) { +func (client SettingsClient) UpdateResponder(resp *http.Response) (result Setting, err error) { err = autorest.Respond( resp, client.ByInspecting(), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/workspacesettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/workspacesettings.go index 26cfb662aee3..5bb6ae1ca0e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/workspacesettings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/security/mgmt/2017-08-01-preview/security/workspacesettings.go @@ -210,7 +210,8 @@ func (client WorkspaceSettingsClient) DeleteResponder(resp *http.Response) (resu return } -// Get settings about where we should store your security data and logs +// Get settings about where we should store your security data and logs. If the result is empty, it means that no +// custom-workspace configuration was set // Parameters: // workspaceSettingName - name of the security setting func (client WorkspaceSettingsClient) Get(ctx context.Context, workspaceSettingName string) (result WorkspaceSetting, err error) { @@ -291,7 +292,8 @@ func (client WorkspaceSettingsClient) GetResponder(resp *http.Response) (result return } -// List settings about where we should store your security data and logs +// List settings about where we should store your security data and logs. If the result is empty, it means that no +// custom-workspace configuration was set func (client WorkspaceSettingsClient) List(ctx context.Context) (result WorkspaceSettingListPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceSettingsClient.List") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go index 35fc4de49fa5..fdcbd1001de5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go @@ -4664,6 +4664,10 @@ type ManagedInstanceProperties struct { DNSZone *string `json:"dnsZone,omitempty"` // DNSZonePartner - The resource id of another managed instance whose DNS zone this managed instance will share after creation. DNSZonePartner *string `json:"dnsZonePartner,omitempty"` + // PublicDataEndpointEnabled - Whether or not the public data endpoint is enabled. + PublicDataEndpointEnabled *bool `json:"publicDataEndpointEnabled,omitempty"` + // ProxyOverride - Proxy override of the managed instance. + ProxyOverride *string `json:"proxyOverride,omitempty"` } // ManagedInstancesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go index ac14bbe74d36..e0b98711d263 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go @@ -3985,6 +3985,241 @@ type ProxyResource struct { Type *string `json:"type,omitempty"` } +// RecoverableManagedDatabase a recoverable managed database resource. +type RecoverableManagedDatabase struct { + autorest.Response `json:"-"` + // RecoverableManagedDatabaseProperties - Resource properties. + *RecoverableManagedDatabaseProperties `json:"properties,omitempty"` + // ID - Resource ID. + ID *string `json:"id,omitempty"` + // Name - Resource name. + Name *string `json:"name,omitempty"` + // Type - Resource type. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RecoverableManagedDatabase. +func (rmd RecoverableManagedDatabase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rmd.RecoverableManagedDatabaseProperties != nil { + objectMap["properties"] = rmd.RecoverableManagedDatabaseProperties + } + if rmd.ID != nil { + objectMap["id"] = rmd.ID + } + if rmd.Name != nil { + objectMap["name"] = rmd.Name + } + if rmd.Type != nil { + objectMap["type"] = rmd.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RecoverableManagedDatabase struct. +func (rmd *RecoverableManagedDatabase) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var recoverableManagedDatabaseProperties RecoverableManagedDatabaseProperties + err = json.Unmarshal(*v, &recoverableManagedDatabaseProperties) + if err != nil { + return err + } + rmd.RecoverableManagedDatabaseProperties = &recoverableManagedDatabaseProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rmd.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rmd.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rmd.Type = &typeVar + } + } + } + + return nil +} + +// RecoverableManagedDatabaseListResult a list of recoverable managed databases. +type RecoverableManagedDatabaseListResult struct { + autorest.Response `json:"-"` + // Value - Array of results. + Value *[]RecoverableManagedDatabase `json:"value,omitempty"` + // NextLink - Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RecoverableManagedDatabaseListResultIterator provides access to a complete listing of +// RecoverableManagedDatabase values. +type RecoverableManagedDatabaseListResultIterator struct { + i int + page RecoverableManagedDatabaseListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RecoverableManagedDatabaseListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoverableManagedDatabaseListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RecoverableManagedDatabaseListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RecoverableManagedDatabaseListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RecoverableManagedDatabaseListResultIterator) Response() RecoverableManagedDatabaseListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RecoverableManagedDatabaseListResultIterator) Value() RecoverableManagedDatabase { + if !iter.page.NotDone() { + return RecoverableManagedDatabase{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RecoverableManagedDatabaseListResultIterator type. +func NewRecoverableManagedDatabaseListResultIterator(page RecoverableManagedDatabaseListResultPage) RecoverableManagedDatabaseListResultIterator { + return RecoverableManagedDatabaseListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rmdlr RecoverableManagedDatabaseListResult) IsEmpty() bool { + return rmdlr.Value == nil || len(*rmdlr.Value) == 0 +} + +// recoverableManagedDatabaseListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rmdlr RecoverableManagedDatabaseListResult) recoverableManagedDatabaseListResultPreparer(ctx context.Context) (*http.Request, error) { + if rmdlr.NextLink == nil || len(to.String(rmdlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rmdlr.NextLink))) +} + +// RecoverableManagedDatabaseListResultPage contains a page of RecoverableManagedDatabase values. +type RecoverableManagedDatabaseListResultPage struct { + fn func(context.Context, RecoverableManagedDatabaseListResult) (RecoverableManagedDatabaseListResult, error) + rmdlr RecoverableManagedDatabaseListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RecoverableManagedDatabaseListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoverableManagedDatabaseListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rmdlr) + if err != nil { + return err + } + page.rmdlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RecoverableManagedDatabaseListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RecoverableManagedDatabaseListResultPage) NotDone() bool { + return !page.rmdlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RecoverableManagedDatabaseListResultPage) Response() RecoverableManagedDatabaseListResult { + return page.rmdlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RecoverableManagedDatabaseListResultPage) Values() []RecoverableManagedDatabase { + if page.rmdlr.IsEmpty() { + return nil + } + return *page.rmdlr.Value +} + +// Creates a new instance of the RecoverableManagedDatabaseListResultPage type. +func NewRecoverableManagedDatabaseListResultPage(getNextPage func(context.Context, RecoverableManagedDatabaseListResult) (RecoverableManagedDatabaseListResult, error)) RecoverableManagedDatabaseListResultPage { + return RecoverableManagedDatabaseListResultPage{fn: getNextPage} +} + +// RecoverableManagedDatabaseProperties the recoverable managed database's properties. +type RecoverableManagedDatabaseProperties struct { + // LastAvailableBackupDate - The last available backup date. + LastAvailableBackupDate *string `json:"lastAvailableBackupDate,omitempty"` +} + // Resource ARM resource. type Resource struct { // ID - Resource ID. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/recoverablemanageddatabases.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/recoverablemanageddatabases.go new file mode 100644 index 000000000000..bc860031cfad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/sql/mgmt/2017-10-01-preview/sql/recoverablemanageddatabases.go @@ -0,0 +1,238 @@ +package sql + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RecoverableManagedDatabasesClient is the the Azure SQL Database management API provides a RESTful set of web +// services that interact with Azure SQL Database services to manage your databases. The API enables you to create, +// retrieve, update, and delete databases. +type RecoverableManagedDatabasesClient struct { + BaseClient +} + +// NewRecoverableManagedDatabasesClient creates an instance of the RecoverableManagedDatabasesClient client. +func NewRecoverableManagedDatabasesClient(subscriptionID string) RecoverableManagedDatabasesClient { + return NewRecoverableManagedDatabasesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRecoverableManagedDatabasesClientWithBaseURI creates an instance of the RecoverableManagedDatabasesClient client. +func NewRecoverableManagedDatabasesClientWithBaseURI(baseURI string, subscriptionID string) RecoverableManagedDatabasesClient { + return RecoverableManagedDatabasesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a recoverable managed database. +// Parameters: +// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value +// from the Azure Resource Manager API or the portal. +// managedInstanceName - the name of the managed instance. +func (client RecoverableManagedDatabasesClient) Get(ctx context.Context, resourceGroupName string, managedInstanceName string, recoverableDatabaseName string) (result RecoverableManagedDatabase, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoverableManagedDatabasesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, managedInstanceName, recoverableDatabaseName) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RecoverableManagedDatabasesClient) GetPreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, recoverableDatabaseName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "managedInstanceName": autorest.Encode("path", managedInstanceName), + "recoverableDatabaseName": autorest.Encode("path", recoverableDatabaseName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/recoverableDatabases/{recoverableDatabaseName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RecoverableManagedDatabasesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RecoverableManagedDatabasesClient) GetResponder(resp *http.Response) (result RecoverableManagedDatabase, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByInstance gets a list of recoverable managed databases. +// Parameters: +// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value +// from the Azure Resource Manager API or the portal. +// managedInstanceName - the name of the managed instance. +func (client RecoverableManagedDatabasesClient) ListByInstance(ctx context.Context, resourceGroupName string, managedInstanceName string) (result RecoverableManagedDatabaseListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoverableManagedDatabasesClient.ListByInstance") + defer func() { + sc := -1 + if result.rmdlr.Response.Response != nil { + sc = result.rmdlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByInstanceNextResults + req, err := client.ListByInstancePreparer(ctx, resourceGroupName, managedInstanceName) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "ListByInstance", nil, "Failure preparing request") + return + } + + resp, err := client.ListByInstanceSender(req) + if err != nil { + result.rmdlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "ListByInstance", resp, "Failure sending request") + return + } + + result.rmdlr, err = client.ListByInstanceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "ListByInstance", resp, "Failure responding to request") + } + + return +} + +// ListByInstancePreparer prepares the ListByInstance request. +func (client RecoverableManagedDatabasesClient) ListByInstancePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "managedInstanceName": autorest.Encode("path", managedInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/recoverableDatabases", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByInstanceSender sends the ListByInstance request. The method will close the +// http.Response Body if it receives an error. +func (client RecoverableManagedDatabasesClient) ListByInstanceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByInstanceResponder handles the response to the ListByInstance request. The method always +// closes the http.Response Body. +func (client RecoverableManagedDatabasesClient) ListByInstanceResponder(resp *http.Response) (result RecoverableManagedDatabaseListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByInstanceNextResults retrieves the next set of results, if any. +func (client RecoverableManagedDatabasesClient) listByInstanceNextResults(ctx context.Context, lastResults RecoverableManagedDatabaseListResult) (result RecoverableManagedDatabaseListResult, err error) { + req, err := lastResults.recoverableManagedDatabaseListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "listByInstanceNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByInstanceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "listByInstanceNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByInstanceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.RecoverableManagedDatabasesClient", "listByInstanceNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByInstanceComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecoverableManagedDatabasesClient) ListByInstanceComplete(ctx context.Context, resourceGroupName string, managedInstanceName string) (result RecoverableManagedDatabaseListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoverableManagedDatabasesClient.ListByInstance") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByInstance(ctx, resourceGroupName, managedInstanceName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/models.go index a90283a131af..bd54eeaa71b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/models.go @@ -96,8 +96,8 @@ const ( ContainerTypeAzureBackupServerContainer1 ContainerTypeBasicProtectionContainer = "AzureBackupServerContainer" // ContainerTypeAzureSQLContainer1 ... ContainerTypeAzureSQLContainer1 ContainerTypeBasicProtectionContainer = "AzureSqlContainer" - // ContainerTypeAzureWorkloadBackupRequest ... - ContainerTypeAzureWorkloadBackupRequest ContainerTypeBasicProtectionContainer = "AzureWorkloadBackupRequest" + // ContainerTypeAzureWorkloadContainer ... + ContainerTypeAzureWorkloadContainer ContainerTypeBasicProtectionContainer = "AzureWorkloadContainer" // ContainerTypeDPMContainer1 ... ContainerTypeDPMContainer1 ContainerTypeBasicProtectionContainer = "DPMContainer" // ContainerTypeGenericContainer1 ... @@ -122,7 +122,7 @@ const ( // PossibleContainerTypeBasicProtectionContainerValues returns an array of possible values for the ContainerTypeBasicProtectionContainer const type. func PossibleContainerTypeBasicProtectionContainerValues() []ContainerTypeBasicProtectionContainer { - return []ContainerTypeBasicProtectionContainer{ContainerTypeAzureBackupServerContainer1, ContainerTypeAzureSQLContainer1, ContainerTypeAzureWorkloadBackupRequest, ContainerTypeDPMContainer1, ContainerTypeGenericContainer1, ContainerTypeIaaSVMContainer, ContainerTypeMicrosoftClassicComputevirtualMachines, ContainerTypeMicrosoftComputevirtualMachines, ContainerTypeProtectionContainer, ContainerTypeSQLAGWorkLoadContainer1, ContainerTypeStorageContainer1, ContainerTypeVMAppContainer1, ContainerTypeWindows1} + return []ContainerTypeBasicProtectionContainer{ContainerTypeAzureBackupServerContainer1, ContainerTypeAzureSQLContainer1, ContainerTypeAzureWorkloadContainer, ContainerTypeDPMContainer1, ContainerTypeGenericContainer1, ContainerTypeIaaSVMContainer, ContainerTypeMicrosoftClassicComputevirtualMachines, ContainerTypeMicrosoftComputevirtualMachines, ContainerTypeProtectionContainer, ContainerTypeSQLAGWorkLoadContainer1, ContainerTypeStorageContainer1, ContainerTypeVMAppContainer1, ContainerTypeWindows1} } // CopyOptions enumerates the values for copy options. @@ -181,6 +181,8 @@ const ( DataSourceTypeGenericDataSource DataSourceType = "GenericDataSource" // DataSourceTypeInvalid ... DataSourceTypeInvalid DataSourceType = "Invalid" + // DataSourceTypeSAPAseDatabase ... + DataSourceTypeSAPAseDatabase DataSourceType = "SAPAseDatabase" // DataSourceTypeSAPHanaDatabase ... DataSourceTypeSAPHanaDatabase DataSourceType = "SAPHanaDatabase" // DataSourceTypeSharepoint ... @@ -199,7 +201,7 @@ const ( // PossibleDataSourceTypeValues returns an array of possible values for the DataSourceType const type. func PossibleDataSourceTypeValues() []DataSourceType { - return []DataSourceType{DataSourceTypeAzureFileShare, DataSourceTypeAzureSQLDb, DataSourceTypeClient, DataSourceTypeExchange, DataSourceTypeFileFolder, DataSourceTypeGenericDataSource, DataSourceTypeInvalid, DataSourceTypeSAPHanaDatabase, DataSourceTypeSharepoint, DataSourceTypeSQLDataBase, DataSourceTypeSQLDB, DataSourceTypeSystemState, DataSourceTypeVM, DataSourceTypeVMwareVM} + return []DataSourceType{DataSourceTypeAzureFileShare, DataSourceTypeAzureSQLDb, DataSourceTypeClient, DataSourceTypeExchange, DataSourceTypeFileFolder, DataSourceTypeGenericDataSource, DataSourceTypeInvalid, DataSourceTypeSAPAseDatabase, DataSourceTypeSAPHanaDatabase, DataSourceTypeSharepoint, DataSourceTypeSQLDataBase, DataSourceTypeSQLDB, DataSourceTypeSystemState, DataSourceTypeVM, DataSourceTypeVMwareVM} } // DayOfWeek enumerates the values for day of week. @@ -488,6 +490,8 @@ const ( ItemTypeGenericDataSource ItemType = "GenericDataSource" // ItemTypeInvalid ... ItemTypeInvalid ItemType = "Invalid" + // ItemTypeSAPAseDatabase ... + ItemTypeSAPAseDatabase ItemType = "SAPAseDatabase" // ItemTypeSAPHanaDatabase ... ItemTypeSAPHanaDatabase ItemType = "SAPHanaDatabase" // ItemTypeSharepoint ... @@ -506,7 +510,7 @@ const ( // PossibleItemTypeValues returns an array of possible values for the ItemType const type. func PossibleItemTypeValues() []ItemType { - return []ItemType{ItemTypeAzureFileShare, ItemTypeAzureSQLDb, ItemTypeClient, ItemTypeExchange, ItemTypeFileFolder, ItemTypeGenericDataSource, ItemTypeInvalid, ItemTypeSAPHanaDatabase, ItemTypeSharepoint, ItemTypeSQLDataBase, ItemTypeSQLDB, ItemTypeSystemState, ItemTypeVM, ItemTypeVMwareVM} + return []ItemType{ItemTypeAzureFileShare, ItemTypeAzureSQLDb, ItemTypeClient, ItemTypeExchange, ItemTypeFileFolder, ItemTypeGenericDataSource, ItemTypeInvalid, ItemTypeSAPAseDatabase, ItemTypeSAPHanaDatabase, ItemTypeSharepoint, ItemTypeSQLDataBase, ItemTypeSQLDB, ItemTypeSystemState, ItemTypeVM, ItemTypeVMwareVM} } // JobOperationType enumerates the values for job operation type. @@ -810,6 +814,8 @@ type ObjectTypeBasicRecoveryPoint string const ( // ObjectTypeAzureFileShareRecoveryPoint ... ObjectTypeAzureFileShareRecoveryPoint ObjectTypeBasicRecoveryPoint = "AzureFileShareRecoveryPoint" + // ObjectTypeAzureWorkloadPointInTimeRecoveryPoint ... + ObjectTypeAzureWorkloadPointInTimeRecoveryPoint ObjectTypeBasicRecoveryPoint = "AzureWorkloadPointInTimeRecoveryPoint" // ObjectTypeAzureWorkloadRecoveryPoint ... ObjectTypeAzureWorkloadRecoveryPoint ObjectTypeBasicRecoveryPoint = "AzureWorkloadRecoveryPoint" // ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint ... @@ -830,7 +836,7 @@ const ( // PossibleObjectTypeBasicRecoveryPointValues returns an array of possible values for the ObjectTypeBasicRecoveryPoint const type. func PossibleObjectTypeBasicRecoveryPointValues() []ObjectTypeBasicRecoveryPoint { - return []ObjectTypeBasicRecoveryPoint{ObjectTypeAzureFileShareRecoveryPoint, ObjectTypeAzureWorkloadRecoveryPoint, ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint, ObjectTypeAzureWorkloadSAPHanaRecoveryPoint, ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint, ObjectTypeAzureWorkloadSQLRecoveryPoint, ObjectTypeGenericRecoveryPoint, ObjectTypeIaasVMRecoveryPoint, ObjectTypeRecoveryPoint} + return []ObjectTypeBasicRecoveryPoint{ObjectTypeAzureFileShareRecoveryPoint, ObjectTypeAzureWorkloadPointInTimeRecoveryPoint, ObjectTypeAzureWorkloadRecoveryPoint, ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint, ObjectTypeAzureWorkloadSAPHanaRecoveryPoint, ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint, ObjectTypeAzureWorkloadSQLRecoveryPoint, ObjectTypeGenericRecoveryPoint, ObjectTypeIaasVMRecoveryPoint, ObjectTypeRecoveryPoint} } // ObjectTypeBasicRequest enumerates the values for object type basic request. @@ -858,6 +864,8 @@ type ObjectTypeBasicRestoreRequest string const ( // ObjectTypeAzureFileShareRestoreRequest ... ObjectTypeAzureFileShareRestoreRequest ObjectTypeBasicRestoreRequest = "AzureFileShareRestoreRequest" + // ObjectTypeAzureWorkloadPointInTimeRestoreRequest ... + ObjectTypeAzureWorkloadPointInTimeRestoreRequest ObjectTypeBasicRestoreRequest = "AzureWorkloadPointInTimeRestoreRequest" // ObjectTypeAzureWorkloadRestoreRequest ... ObjectTypeAzureWorkloadRestoreRequest ObjectTypeBasicRestoreRequest = "AzureWorkloadRestoreRequest" // ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest ... @@ -876,7 +884,7 @@ const ( // PossibleObjectTypeBasicRestoreRequestValues returns an array of possible values for the ObjectTypeBasicRestoreRequest const type. func PossibleObjectTypeBasicRestoreRequestValues() []ObjectTypeBasicRestoreRequest { - return []ObjectTypeBasicRestoreRequest{ObjectTypeAzureFileShareRestoreRequest, ObjectTypeAzureWorkloadRestoreRequest, ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest, ObjectTypeAzureWorkloadSAPHanaRestoreRequest, ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest, ObjectTypeAzureWorkloadSQLRestoreRequest, ObjectTypeIaasVMRestoreRequest, ObjectTypeRestoreRequest} + return []ObjectTypeBasicRestoreRequest{ObjectTypeAzureFileShareRestoreRequest, ObjectTypeAzureWorkloadPointInTimeRestoreRequest, ObjectTypeAzureWorkloadRestoreRequest, ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest, ObjectTypeAzureWorkloadSAPHanaRestoreRequest, ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest, ObjectTypeAzureWorkloadSQLRestoreRequest, ObjectTypeIaasVMRestoreRequest, ObjectTypeRestoreRequest} } // ObjectTypeBasicValidateOperationRequest enumerates the values for object type basic validate operation @@ -918,6 +926,23 @@ func PossibleOperationStatusValuesValues() []OperationStatusValues { return []OperationStatusValues{OperationStatusValuesCanceled, OperationStatusValuesFailed, OperationStatusValuesInProgress, OperationStatusValuesInvalid, OperationStatusValuesSucceeded} } +// OperationType enumerates the values for operation type. +type OperationType string + +const ( + // OperationTypeInvalid ... + OperationTypeInvalid OperationType = "Invalid" + // OperationTypeRegister ... + OperationTypeRegister OperationType = "Register" + // OperationTypeReregister ... + OperationTypeReregister OperationType = "Reregister" +) + +// PossibleOperationTypeValues returns an array of possible values for the OperationType const type. +func PossibleOperationTypeValues() []OperationType { + return []OperationType{OperationTypeInvalid, OperationTypeRegister, OperationTypeReregister} +} + // OverwriteOptions enumerates the values for overwrite options. type OverwriteOptions string @@ -987,6 +1012,10 @@ const ( ProtectableItemTypeMicrosoftClassicComputevirtualMachines ProtectableItemType = "Microsoft.ClassicCompute/virtualMachines" // ProtectableItemTypeMicrosoftComputevirtualMachines ... ProtectableItemTypeMicrosoftComputevirtualMachines ProtectableItemType = "Microsoft.Compute/virtualMachines" + // ProtectableItemTypeSAPAseDatabase ... + ProtectableItemTypeSAPAseDatabase ProtectableItemType = "SAPAseDatabase" + // ProtectableItemTypeSAPAseSystem ... + ProtectableItemTypeSAPAseSystem ProtectableItemType = "SAPAseSystem" // ProtectableItemTypeSAPHanaDatabase ... ProtectableItemTypeSAPHanaDatabase ProtectableItemType = "SAPHanaDatabase" // ProtectableItemTypeSAPHanaSystem ... @@ -1003,7 +1032,7 @@ const ( // PossibleProtectableItemTypeValues returns an array of possible values for the ProtectableItemType const type. func PossibleProtectableItemTypeValues() []ProtectableItemType { - return []ProtectableItemType{ProtectableItemTypeAzureFileShare, ProtectableItemTypeAzureVMWorkloadProtectableItem, ProtectableItemTypeIaaSVMProtectableItem, ProtectableItemTypeMicrosoftClassicComputevirtualMachines, ProtectableItemTypeMicrosoftComputevirtualMachines, ProtectableItemTypeSAPHanaDatabase, ProtectableItemTypeSAPHanaSystem, ProtectableItemTypeSQLAvailabilityGroupContainer, ProtectableItemTypeSQLDataBase, ProtectableItemTypeSQLInstance, ProtectableItemTypeWorkloadProtectableItem} + return []ProtectableItemType{ProtectableItemTypeAzureFileShare, ProtectableItemTypeAzureVMWorkloadProtectableItem, ProtectableItemTypeIaaSVMProtectableItem, ProtectableItemTypeMicrosoftClassicComputevirtualMachines, ProtectableItemTypeMicrosoftComputevirtualMachines, ProtectableItemTypeSAPAseDatabase, ProtectableItemTypeSAPAseSystem, ProtectableItemTypeSAPHanaDatabase, ProtectableItemTypeSAPHanaSystem, ProtectableItemTypeSQLAvailabilityGroupContainer, ProtectableItemTypeSQLDataBase, ProtectableItemTypeSQLInstance, ProtectableItemTypeWorkloadProtectableItem} } // ProtectedItemHealthStatus enumerates the values for protected item health status. @@ -1060,6 +1089,8 @@ const ( ProtectedItemTypeAzureIaaSVMProtectedItem ProtectedItemType = "AzureIaaSVMProtectedItem" // ProtectedItemTypeAzureVMWorkloadProtectedItem ... ProtectedItemTypeAzureVMWorkloadProtectedItem ProtectedItemType = "AzureVmWorkloadProtectedItem" + // ProtectedItemTypeAzureVMWorkloadSAPAseDatabase ... + ProtectedItemTypeAzureVMWorkloadSAPAseDatabase ProtectedItemType = "AzureVmWorkloadSAPAseDatabase" // ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase ... ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase ProtectedItemType = "AzureVmWorkloadSAPHanaDatabase" // ProtectedItemTypeAzureVMWorkloadSQLDatabase ... @@ -1082,7 +1113,7 @@ const ( // PossibleProtectedItemTypeValues returns an array of possible values for the ProtectedItemType const type. func PossibleProtectedItemTypeValues() []ProtectedItemType { - return []ProtectedItemType{ProtectedItemTypeAzureFileShareProtectedItem, ProtectedItemTypeAzureIaaSVMProtectedItem, ProtectedItemTypeAzureVMWorkloadProtectedItem, ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase, ProtectedItemTypeAzureVMWorkloadSQLDatabase, ProtectedItemTypeDPMProtectedItem, ProtectedItemTypeGenericProtectedItem, ProtectedItemTypeMabFileFolderProtectedItem, ProtectedItemTypeMicrosoftClassicComputevirtualMachines, ProtectedItemTypeMicrosoftComputevirtualMachines, ProtectedItemTypeMicrosoftSqlserversdatabases, ProtectedItemTypeProtectedItem} + return []ProtectedItemType{ProtectedItemTypeAzureFileShareProtectedItem, ProtectedItemTypeAzureIaaSVMProtectedItem, ProtectedItemTypeAzureVMWorkloadProtectedItem, ProtectedItemTypeAzureVMWorkloadSAPAseDatabase, ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase, ProtectedItemTypeAzureVMWorkloadSQLDatabase, ProtectedItemTypeDPMProtectedItem, ProtectedItemTypeGenericProtectedItem, ProtectedItemTypeMabFileFolderProtectedItem, ProtectedItemTypeMicrosoftClassicComputevirtualMachines, ProtectedItemTypeMicrosoftComputevirtualMachines, ProtectedItemTypeMicrosoftSqlserversdatabases, ProtectedItemTypeProtectedItem} } // ProtectionIntentItemType enumerates the values for protection intent item type. @@ -1534,6 +1565,10 @@ type WorkloadItemType string const ( // WorkloadItemTypeInvalid ... WorkloadItemTypeInvalid WorkloadItemType = "Invalid" + // WorkloadItemTypeSAPAseDatabase ... + WorkloadItemTypeSAPAseDatabase WorkloadItemType = "SAPAseDatabase" + // WorkloadItemTypeSAPAseSystem ... + WorkloadItemTypeSAPAseSystem WorkloadItemType = "SAPAseSystem" // WorkloadItemTypeSAPHanaDatabase ... WorkloadItemTypeSAPHanaDatabase WorkloadItemType = "SAPHanaDatabase" // WorkloadItemTypeSAPHanaSystem ... @@ -1546,7 +1581,7 @@ const ( // PossibleWorkloadItemTypeValues returns an array of possible values for the WorkloadItemType const type. func PossibleWorkloadItemTypeValues() []WorkloadItemType { - return []WorkloadItemType{WorkloadItemTypeInvalid, WorkloadItemTypeSAPHanaDatabase, WorkloadItemTypeSAPHanaSystem, WorkloadItemTypeSQLDataBase, WorkloadItemTypeSQLInstance} + return []WorkloadItemType{WorkloadItemTypeInvalid, WorkloadItemTypeSAPAseDatabase, WorkloadItemTypeSAPAseSystem, WorkloadItemTypeSAPHanaDatabase, WorkloadItemTypeSAPHanaSystem, WorkloadItemTypeSQLDataBase, WorkloadItemTypeSQLInstance} } // WorkloadItemTypeBasicWorkloadItem enumerates the values for workload item type basic workload item. @@ -1555,6 +1590,10 @@ type WorkloadItemTypeBasicWorkloadItem string const ( // WorkloadItemTypeAzureVMWorkloadItem ... WorkloadItemTypeAzureVMWorkloadItem WorkloadItemTypeBasicWorkloadItem = "AzureVmWorkloadItem" + // WorkloadItemTypeSAPAseDatabase1 ... + WorkloadItemTypeSAPAseDatabase1 WorkloadItemTypeBasicWorkloadItem = "SAPAseDatabase" + // WorkloadItemTypeSAPAseSystem1 ... + WorkloadItemTypeSAPAseSystem1 WorkloadItemTypeBasicWorkloadItem = "SAPAseSystem" // WorkloadItemTypeSAPHanaDatabase1 ... WorkloadItemTypeSAPHanaDatabase1 WorkloadItemTypeBasicWorkloadItem = "SAPHanaDatabase" // WorkloadItemTypeSAPHanaSystem1 ... @@ -1569,7 +1608,7 @@ const ( // PossibleWorkloadItemTypeBasicWorkloadItemValues returns an array of possible values for the WorkloadItemTypeBasicWorkloadItem const type. func PossibleWorkloadItemTypeBasicWorkloadItemValues() []WorkloadItemTypeBasicWorkloadItem { - return []WorkloadItemTypeBasicWorkloadItem{WorkloadItemTypeAzureVMWorkloadItem, WorkloadItemTypeSAPHanaDatabase1, WorkloadItemTypeSAPHanaSystem1, WorkloadItemTypeSQLDataBase1, WorkloadItemTypeSQLInstance1, WorkloadItemTypeWorkloadItem} + return []WorkloadItemTypeBasicWorkloadItem{WorkloadItemTypeAzureVMWorkloadItem, WorkloadItemTypeSAPAseDatabase1, WorkloadItemTypeSAPAseSystem1, WorkloadItemTypeSAPHanaDatabase1, WorkloadItemTypeSAPHanaSystem1, WorkloadItemTypeSQLDataBase1, WorkloadItemTypeSQLInstance1, WorkloadItemTypeWorkloadItem} } // WorkloadType enumerates the values for workload type. @@ -1590,6 +1629,8 @@ const ( WorkloadTypeGenericDataSource WorkloadType = "GenericDataSource" // WorkloadTypeInvalid ... WorkloadTypeInvalid WorkloadType = "Invalid" + // WorkloadTypeSAPAseDatabase ... + WorkloadTypeSAPAseDatabase WorkloadType = "SAPAseDatabase" // WorkloadTypeSAPHanaDatabase ... WorkloadTypeSAPHanaDatabase WorkloadType = "SAPHanaDatabase" // WorkloadTypeSharepoint ... @@ -1608,7 +1649,7 @@ const ( // PossibleWorkloadTypeValues returns an array of possible values for the WorkloadType const type. func PossibleWorkloadTypeValues() []WorkloadType { - return []WorkloadType{WorkloadTypeAzureFileShare, WorkloadTypeAzureSQLDb, WorkloadTypeClient, WorkloadTypeExchange, WorkloadTypeFileFolder, WorkloadTypeGenericDataSource, WorkloadTypeInvalid, WorkloadTypeSAPHanaDatabase, WorkloadTypeSharepoint, WorkloadTypeSQLDataBase, WorkloadTypeSQLDB, WorkloadTypeSystemState, WorkloadTypeVM, WorkloadTypeVMwareVM} + return []WorkloadType{WorkloadTypeAzureFileShare, WorkloadTypeAzureSQLDb, WorkloadTypeClient, WorkloadTypeExchange, WorkloadTypeFileFolder, WorkloadTypeGenericDataSource, WorkloadTypeInvalid, WorkloadTypeSAPAseDatabase, WorkloadTypeSAPHanaDatabase, WorkloadTypeSharepoint, WorkloadTypeSQLDataBase, WorkloadTypeSQLDB, WorkloadTypeSystemState, WorkloadTypeVM, WorkloadTypeVMwareVM} } // AzureBackupGoalFeatureSupportRequest azure backup goal feature specific request. @@ -1673,7 +1714,7 @@ type AzureBackupServerContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -1773,6 +1814,11 @@ func (absc AzureBackupServerContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureBackupServerContainer. +func (absc AzureBackupServerContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return &absc, true +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureBackupServerContainer. func (absc AzureBackupServerContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -1961,7 +2007,7 @@ type AzureFileShareProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -2021,6 +2067,16 @@ func (afspi AzureFileShareProtectableItem) AsBasicAzureVMWorkloadProtectableItem return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureFileShareProtectableItem. +func (afspi AzureFileShareProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureFileShareProtectableItem. +func (afspi AzureFileShareProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureFileShareProtectableItem. func (afspi AzureFileShareProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -2084,7 +2140,7 @@ type AzureFileshareProtectedItem struct { ExtendedInfo *AzureFileshareProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -2098,7 +2154,7 @@ type AzureFileshareProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -2197,6 +2253,11 @@ func (afpi AzureFileshareProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (B return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureFileshareProtectedItem. +func (afpi AzureFileshareProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureFileshareProtectedItem. func (afpi AzureFileshareProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -2244,7 +2305,7 @@ type AzureFileshareProtectedItemExtendedInfo struct { // AzureFileShareProtectionPolicy azureStorage backup policy. type AzureFileShareProtectionPolicy struct { - // WorkLoadType - Type of workload for the backup management. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkLoadType - Type of workload for the backup management. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkLoadType WorkloadType `json:"workLoadType,omitempty"` // SchedulePolicy - Backup schedule specified as part of backup policy. SchedulePolicy BasicSchedulePolicy `json:"schedulePolicy,omitempty"` @@ -2394,7 +2455,7 @@ type AzureFileShareRecoveryPoint struct { RecoveryPointTime *date.Time `json:"recoveryPointTime,omitempty"` // FileShareSnapshotURI - Contains Url to the snapshot of fileshare, if applicable FileShareSnapshotURI *string `json:"fileShareSnapshotUri,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -2422,6 +2483,16 @@ func (afsrp AzureFileShareRecoveryPoint) AsAzureFileShareRecoveryPoint() (*Azure return &afsrp, true } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureFileShareRecoveryPoint. +func (afsrp AzureFileShareRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureFileShareRecoveryPoint. +func (afsrp AzureFileShareRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureFileShareRecoveryPoint. func (afsrp AzureFileShareRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -2442,11 +2513,6 @@ func (afsrp AzureFileShareRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() ( return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureFileShareRecoveryPoint. -func (afsrp AzureFileShareRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureFileShareRecoveryPoint. func (afsrp AzureFileShareRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -2496,7 +2562,7 @@ type AzureFileShareRestoreRequest struct { RestoreFileSpecs *[]RestoreFileSpecs `json:"restoreFileSpecs,omitempty"` // TargetDetails - Target File Share Details TargetDetails *TargetAFSRestoreInfo `json:"targetDetails,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -2533,6 +2599,11 @@ func (afsrr AzureFileShareRestoreRequest) AsAzureFileShareRestoreRequest() (*Azu return &afsrr, true } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureFileShareRestoreRequest. +func (afsrr AzureFileShareRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureFileShareRestoreRequest. func (afsrr AzureFileShareRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -2605,7 +2676,7 @@ type AzureIaaSClassicComputeVMContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -2690,6 +2761,11 @@ func (aisccvc AzureIaaSClassicComputeVMContainer) AsDpmContainer() (*DpmContaine return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureIaaSClassicComputeVMContainer. +func (aisccvc AzureIaaSClassicComputeVMContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureIaaSClassicComputeVMContainer. func (aisccvc AzureIaaSClassicComputeVMContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -2733,7 +2809,7 @@ type AzureIaaSClassicComputeVMProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -2787,6 +2863,16 @@ func (aisccvpi AzureIaaSClassicComputeVMProtectableItem) AsBasicAzureVMWorkloadP return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureIaaSClassicComputeVMProtectableItem. +func (aisccvpi AzureIaaSClassicComputeVMProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureIaaSClassicComputeVMProtectableItem. +func (aisccvpi AzureIaaSClassicComputeVMProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureIaaSClassicComputeVMProtectableItem. func (aisccvpi AzureIaaSClassicComputeVMProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -2857,7 +2943,7 @@ type AzureIaaSClassicComputeVMProtectedItem struct { ExtendedInfo *AzureIaaSVMProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -2871,7 +2957,7 @@ type AzureIaaSClassicComputeVMProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -2979,6 +3065,11 @@ func (aisccvpi AzureIaaSClassicComputeVMProtectedItem) AsBasicAzureVMWorkloadPro return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureIaaSClassicComputeVMProtectedItem. +func (aisccvpi AzureIaaSClassicComputeVMProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureIaaSClassicComputeVMProtectedItem. func (aisccvpi AzureIaaSClassicComputeVMProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -3031,7 +3122,7 @@ type AzureIaaSComputeVMContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -3116,6 +3207,11 @@ func (aiscvc AzureIaaSComputeVMContainer) AsDpmContainer() (*DpmContainer, bool) return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureIaaSComputeVMContainer. +func (aiscvc AzureIaaSComputeVMContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureIaaSComputeVMContainer. func (aiscvc AzureIaaSComputeVMContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -3159,7 +3255,7 @@ type AzureIaaSComputeVMProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -3213,6 +3309,16 @@ func (aiscvpi AzureIaaSComputeVMProtectableItem) AsBasicAzureVMWorkloadProtectab return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureIaaSComputeVMProtectableItem. +func (aiscvpi AzureIaaSComputeVMProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureIaaSComputeVMProtectableItem. +func (aiscvpi AzureIaaSComputeVMProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureIaaSComputeVMProtectableItem. func (aiscvpi AzureIaaSComputeVMProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -3283,7 +3389,7 @@ type AzureIaaSComputeVMProtectedItem struct { ExtendedInfo *AzureIaaSVMProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -3297,7 +3403,7 @@ type AzureIaaSComputeVMProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -3405,6 +3511,11 @@ func (aiscvpi AzureIaaSComputeVMProtectedItem) AsBasicAzureVMWorkloadProtectedIt return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureIaaSComputeVMProtectedItem. +func (aiscvpi AzureIaaSComputeVMProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureIaaSComputeVMProtectedItem. func (aiscvpi AzureIaaSComputeVMProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -3632,7 +3743,7 @@ type AzureIaaSVMJobTaskDetails struct { // ProgressPercentage - Progress of the task. ProgressPercentage *float64 `json:"progressPercentage,omitempty"` // TaskExecutionDetails - Details about execution of the task. - // eg: number of bytes transferred etc + // eg: number of bytes transferred etc TaskExecutionDetails *string `json:"taskExecutionDetails,omitempty"` } @@ -3667,7 +3778,7 @@ type AzureIaaSVMProtectedItem struct { ExtendedInfo *AzureIaaSVMProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -3681,7 +3792,7 @@ type AzureIaaSVMProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -3830,6 +3941,11 @@ func (aispi AzureIaaSVMProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (Bas return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureIaaSVMProtectedItem. +func (aispi AzureIaaSVMProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureIaaSVMProtectedItem. func (aispi AzureIaaSVMProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -4243,8 +4359,10 @@ type AzureSQLAGWorkloadContainerProtectionContainer struct { LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` // ExtendedInfo - Additional details of a workload container. ExtendedInfo *AzureWorkloadContainerExtendedInfo `json:"extendedInfo,omitempty"` - // WorkloadType - Workload type for which registration was sent. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type for which registration was sent. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` + // OperationType - Re-Do Operation. Possible values include: 'OperationTypeInvalid', 'OperationTypeRegister', 'OperationTypeReregister' + OperationType OperationType `json:"operationType,omitempty"` // FriendlyName - Friendly name of the container. FriendlyName *string `json:"friendlyName,omitempty"` // BackupManagementType - Type of backup management for the container. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' @@ -4253,7 +4371,7 @@ type AzureSQLAGWorkloadContainerProtectionContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -4273,6 +4391,9 @@ func (aswcpc AzureSQLAGWorkloadContainerProtectionContainer) MarshalJSON() ([]by if aswcpc.WorkloadType != "" { objectMap["workloadType"] = aswcpc.WorkloadType } + if aswcpc.OperationType != "" { + objectMap["operationType"] = aswcpc.OperationType + } if aswcpc.FriendlyName != nil { objectMap["friendlyName"] = aswcpc.FriendlyName } @@ -4341,6 +4462,11 @@ func (aswcpc AzureSQLAGWorkloadContainerProtectionContainer) AsDpmContainer() (* return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureSQLAGWorkloadContainerProtectionContainer. +func (aswcpc AzureSQLAGWorkloadContainerProtectionContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureSQLAGWorkloadContainerProtectionContainer. func (aswcpc AzureSQLAGWorkloadContainerProtectionContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -4381,7 +4507,7 @@ type AzureSQLContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -4457,6 +4583,11 @@ func (asc AzureSQLContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureSQLContainer. +func (asc AzureSQLContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureSQLContainer. func (asc AzureSQLContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -4497,7 +4628,7 @@ type AzureSQLProtectedItem struct { ExtendedInfo *AzureSQLProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -4511,7 +4642,7 @@ type AzureSQLProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -4598,6 +4729,11 @@ func (aspi AzureSQLProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAz return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureSQLProtectedItem. +func (aspi AzureSQLProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureSQLProtectedItem. func (aspi AzureSQLProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -4766,7 +4902,7 @@ type AzureStorageContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -4854,6 +4990,11 @@ func (asc AzureStorageContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureStorageContainer. +func (asc AzureStorageContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureStorageContainer. func (asc AzureStorageContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -5163,8 +5304,10 @@ type AzureVMAppContainerProtectionContainer struct { LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` // ExtendedInfo - Additional details of a workload container. ExtendedInfo *AzureWorkloadContainerExtendedInfo `json:"extendedInfo,omitempty"` - // WorkloadType - Workload type for which registration was sent. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type for which registration was sent. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` + // OperationType - Re-Do Operation. Possible values include: 'OperationTypeInvalid', 'OperationTypeRegister', 'OperationTypeReregister' + OperationType OperationType `json:"operationType,omitempty"` // FriendlyName - Friendly name of the container. FriendlyName *string `json:"friendlyName,omitempty"` // BackupManagementType - Type of backup management for the container. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' @@ -5173,7 +5316,7 @@ type AzureVMAppContainerProtectionContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -5193,6 +5336,9 @@ func (avacpc AzureVMAppContainerProtectionContainer) MarshalJSON() ([]byte, erro if avacpc.WorkloadType != "" { objectMap["workloadType"] = avacpc.WorkloadType } + if avacpc.OperationType != "" { + objectMap["operationType"] = avacpc.OperationType + } if avacpc.FriendlyName != nil { objectMap["friendlyName"] = avacpc.FriendlyName } @@ -5261,6 +5407,11 @@ func (avacpc AzureVMAppContainerProtectionContainer) AsDpmContainer() (*DpmConta return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureVMAppContainerProtectionContainer. +func (avacpc AzureVMAppContainerProtectionContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureVMAppContainerProtectionContainer. func (avacpc AzureVMAppContainerProtectionContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -5346,6 +5497,8 @@ type AzureVMResourceFeatureSupportResponse struct { // BasicAzureVMWorkloadItem azure VM workload-specific workload item. type BasicAzureVMWorkloadItem interface { + AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) + AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) AsAzureVMWorkloadSAPHanaSystemWorkloadItem() (*AzureVMWorkloadSAPHanaSystemWorkloadItem, bool) AsAzureVMWorkloadSQLDatabaseWorkloadItem() (*AzureVMWorkloadSQLDatabaseWorkloadItem, bool) @@ -5373,7 +5526,7 @@ type AzureVMWorkloadItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` } @@ -5385,6 +5538,14 @@ func unmarshalBasicAzureVMWorkloadItem(body []byte) (BasicAzureVMWorkloadItem, e } switch m["workloadItemType"] { + case string(WorkloadItemTypeSAPAseDatabase1): + var avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem + err := json.Unmarshal(body, &avwsadwi) + return avwsadwi, err + case string(WorkloadItemTypeSAPAseSystem1): + var avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem + err := json.Unmarshal(body, &avwsaswi) + return avwsaswi, err case string(WorkloadItemTypeSAPHanaDatabase1): var avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem err := json.Unmarshal(body, &avwshdwi) @@ -5473,6 +5634,16 @@ func (avwi AzureVMWorkloadItem) AsBasicAzureVMWorkloadItem() (BasicAzureVMWorklo return &avwi, true } +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadItem. +func (avwi AzureVMWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadItem. +func (avwi AzureVMWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadItem. func (avwi AzureVMWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { return nil, false @@ -5505,6 +5676,8 @@ func (avwi AzureVMWorkloadItem) AsBasicWorkloadItem() (BasicWorkloadItem, bool) // BasicAzureVMWorkloadProtectableItem azure VM workload-specific protectable item. type BasicAzureVMWorkloadProtectableItem interface { + AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) + AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) AsAzureVMWorkloadSAPHanaSystemProtectableItem() (*AzureVMWorkloadSAPHanaSystemProtectableItem, bool) AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem() (*AzureVMWorkloadSQLAvailabilityGroupProtectableItem, bool) @@ -5518,7 +5691,7 @@ type AzureVMWorkloadProtectableItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent - // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. ParentUniqueName *string `json:"parentUniqueName,omitempty"` // ServerName - Host/Cluster Name for instance or AG ServerName *string `json:"serverName,omitempty"` @@ -5540,7 +5713,7 @@ type AzureVMWorkloadProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -5552,6 +5725,14 @@ func unmarshalBasicAzureVMWorkloadProtectableItem(body []byte) (BasicAzureVMWork } switch m["protectableItemType"] { + case string(ProtectableItemTypeSAPAseDatabase): + var avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem + err := json.Unmarshal(body, &avwsadpi) + return avwsadpi, err + case string(ProtectableItemTypeSAPAseSystem): + var avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem + err := json.Unmarshal(body, &avwsaspi) + return avwsaspi, err case string(ProtectableItemTypeSAPHanaDatabase): var avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem err := json.Unmarshal(body, &avwshdpi) @@ -5668,6 +5849,16 @@ func (avwpi AzureVMWorkloadProtectableItem) AsBasicAzureVMWorkloadProtectableIte return &avwpi, true } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadProtectableItem. +func (avwpi AzureVMWorkloadProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadProtectableItem. +func (avwpi AzureVMWorkloadProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadProtectableItem. func (avwpi AzureVMWorkloadProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -5715,7 +5906,9 @@ func (avwpi AzureVMWorkloadProtectableItem) AsBasicWorkloadProtectableItem() (Ba // BasicAzureVMWorkloadProtectedItem azure VM workload-specific protected item. type BasicAzureVMWorkloadProtectedItem interface { + AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) + AsAzureVMWorkloadSQLDatabaseProtectedItem() (*AzureVMWorkloadSQLDatabaseProtectedItem, bool) AsAzureVMWorkloadProtectedItem() (*AzureVMWorkloadProtectedItem, bool) } @@ -5747,7 +5940,7 @@ type AzureVMWorkloadProtectedItem struct { ExtendedInfo *AzureVMWorkloadProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -5761,7 +5954,7 @@ type AzureVMWorkloadProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -5773,10 +5966,18 @@ func unmarshalBasicAzureVMWorkloadProtectedItem(body []byte) (BasicAzureVMWorklo } switch m["protectedItemType"] { + case string(ProtectedItemTypeAzureVMWorkloadSAPAseDatabase): + var avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem + err := json.Unmarshal(body, &avwsadpi) + return avwsadpi, err case string(ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase): var avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem err := json.Unmarshal(body, &avwshdpi) return avwshdpi, err + case string(ProtectedItemTypeAzureVMWorkloadSQLDatabase): + var avwsdpi AzureVMWorkloadSQLDatabaseProtectedItem + err := json.Unmarshal(body, &avwsdpi) + return avwsdpi, err default: var avwpi AzureVMWorkloadProtectedItem err := json.Unmarshal(body, &avwpi) @@ -5912,6 +6113,11 @@ func (avwpi AzureVMWorkloadProtectedItem) AsBasicAzureVMWorkloadProtectedItem() return &avwpi, true } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadProtectedItem. +func (avwpi AzureVMWorkloadProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadProtectedItem. func (avwpi AzureVMWorkloadProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -5960,7 +6166,7 @@ type AzureVMWorkloadProtectedItemExtendedInfo struct { // AzureVMWorkloadProtectionPolicy azure VM (Mercury) workload-specific backup policy. type AzureVMWorkloadProtectionPolicy struct { - // WorkLoadType - Type of workload for the backup management. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkLoadType - Type of workload for the backup management. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkLoadType WorkloadType `json:"workLoadType,omitempty"` // Settings - Common settings for the backup management Settings *Settings `json:"settings,omitempty"` @@ -6034,13 +6240,13 @@ func (avwpp AzureVMWorkloadProtectionPolicy) AsBasicProtectionPolicy() (BasicPro return &avwpp, true } -// AzureVMWorkloadSAPHanaDatabaseProtectableItem azure VM workload-specific protectable item representing -// SAP Hana Database. -type AzureVMWorkloadSAPHanaDatabaseProtectableItem struct { +// AzureVMWorkloadSAPAseDatabaseProtectableItem azure VM workload-specific protectable item representing +// SAP ASE Database. +type AzureVMWorkloadSAPAseDatabaseProtectableItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent - // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. ParentUniqueName *string `json:"parentUniqueName,omitempty"` // ServerName - Host/Cluster Name for instance or AG ServerName *string `json:"serverName,omitempty"` @@ -6062,129 +6268,139 @@ type AzureVMWorkloadSAPHanaDatabaseProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } -// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) MarshalJSON() ([]byte, error) { - avwshdpi.ProtectableItemType = ProtectableItemTypeSAPHanaDatabase +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) MarshalJSON() ([]byte, error) { + avwsadpi.ProtectableItemType = ProtectableItemTypeSAPAseDatabase objectMap := make(map[string]interface{}) - if avwshdpi.ParentName != nil { - objectMap["parentName"] = avwshdpi.ParentName + if avwsadpi.ParentName != nil { + objectMap["parentName"] = avwsadpi.ParentName } - if avwshdpi.ParentUniqueName != nil { - objectMap["parentUniqueName"] = avwshdpi.ParentUniqueName + if avwsadpi.ParentUniqueName != nil { + objectMap["parentUniqueName"] = avwsadpi.ParentUniqueName } - if avwshdpi.ServerName != nil { - objectMap["serverName"] = avwshdpi.ServerName + if avwsadpi.ServerName != nil { + objectMap["serverName"] = avwsadpi.ServerName } - if avwshdpi.IsAutoProtectable != nil { - objectMap["isAutoProtectable"] = avwshdpi.IsAutoProtectable + if avwsadpi.IsAutoProtectable != nil { + objectMap["isAutoProtectable"] = avwsadpi.IsAutoProtectable } - if avwshdpi.IsAutoProtected != nil { - objectMap["isAutoProtected"] = avwshdpi.IsAutoProtected + if avwsadpi.IsAutoProtected != nil { + objectMap["isAutoProtected"] = avwsadpi.IsAutoProtected } - if avwshdpi.Subinquireditemcount != nil { - objectMap["subinquireditemcount"] = avwshdpi.Subinquireditemcount + if avwsadpi.Subinquireditemcount != nil { + objectMap["subinquireditemcount"] = avwsadpi.Subinquireditemcount } - if avwshdpi.Subprotectableitemcount != nil { - objectMap["subprotectableitemcount"] = avwshdpi.Subprotectableitemcount + if avwsadpi.Subprotectableitemcount != nil { + objectMap["subprotectableitemcount"] = avwsadpi.Subprotectableitemcount } - if avwshdpi.Prebackupvalidation != nil { - objectMap["prebackupvalidation"] = avwshdpi.Prebackupvalidation + if avwsadpi.Prebackupvalidation != nil { + objectMap["prebackupvalidation"] = avwsadpi.Prebackupvalidation } - if avwshdpi.BackupManagementType != nil { - objectMap["backupManagementType"] = avwshdpi.BackupManagementType + if avwsadpi.BackupManagementType != nil { + objectMap["backupManagementType"] = avwsadpi.BackupManagementType } - if avwshdpi.WorkloadType != nil { - objectMap["workloadType"] = avwshdpi.WorkloadType + if avwsadpi.WorkloadType != nil { + objectMap["workloadType"] = avwsadpi.WorkloadType } - if avwshdpi.FriendlyName != nil { - objectMap["friendlyName"] = avwshdpi.FriendlyName + if avwsadpi.FriendlyName != nil { + objectMap["friendlyName"] = avwsadpi.FriendlyName } - if avwshdpi.ProtectionState != "" { - objectMap["protectionState"] = avwshdpi.ProtectionState + if avwsadpi.ProtectionState != "" { + objectMap["protectionState"] = avwsadpi.ProtectionState } - if avwshdpi.ProtectableItemType != "" { - objectMap["protectableItemType"] = avwshdpi.ProtectableItemType + if avwsadpi.ProtectableItemType != "" { + objectMap["protectableItemType"] = avwsadpi.ProtectableItemType } return json.Marshal(objectMap) } -// AsAzureFileShareProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureFileShareProtectableItem() (*AzureFileShareProtectableItem, bool) { +// AsAzureFileShareProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureFileShareProtectableItem() (*AzureFileShareProtectableItem, bool) { return nil, false } -// AsAzureIaaSClassicComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureIaaSClassicComputeVMProtectableItem() (*AzureIaaSClassicComputeVMProtectableItem, bool) { +// AsAzureIaaSClassicComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureIaaSClassicComputeVMProtectableItem() (*AzureIaaSClassicComputeVMProtectableItem, bool) { return nil, false } -// AsAzureIaaSComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureIaaSComputeVMProtectableItem() (*AzureIaaSComputeVMProtectableItem, bool) { +// AsAzureIaaSComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureIaaSComputeVMProtectableItem() (*AzureIaaSComputeVMProtectableItem, bool) { return nil, false } -// AsAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadProtectableItem() (*AzureVMWorkloadProtectableItem, bool) { +// AsAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadProtectableItem() (*AzureVMWorkloadProtectableItem, bool) { return nil, false } -// AsBasicAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsBasicAzureVMWorkloadProtectableItem() (BasicAzureVMWorkloadProtectableItem, bool) { - return &avwshdpi, true +// AsBasicAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsBasicAzureVMWorkloadProtectableItem() (BasicAzureVMWorkloadProtectableItem, bool) { + return &avwsadpi, true } -// AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { - return &avwshdpi, true +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return &avwsadpi, true } -// AsAzureVMWorkloadSAPHanaSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaSystemProtectableItem() (*AzureVMWorkloadSAPHanaSystemProtectableItem, bool) { +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { return nil, false } -// AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem() (*AzureVMWorkloadSQLAvailabilityGroupProtectableItem, bool) { +// AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false } -// AsAzureVMWorkloadSQLDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSQLDatabaseProtectableItem() (*AzureVMWorkloadSQLDatabaseProtectableItem, bool) { +// AsAzureVMWorkloadSAPHanaSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaSystemProtectableItem() (*AzureVMWorkloadSAPHanaSystemProtectableItem, bool) { return nil, false } -// AsAzureVMWorkloadSQLInstanceProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSQLInstanceProtectableItem() (*AzureVMWorkloadSQLInstanceProtectableItem, bool) { +// AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem() (*AzureVMWorkloadSQLAvailabilityGroupProtectableItem, bool) { return nil, false } -// AsIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsIaaSVMProtectableItem() (*IaaSVMProtectableItem, bool) { +// AsAzureVMWorkloadSQLDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSQLDatabaseProtectableItem() (*AzureVMWorkloadSQLDatabaseProtectableItem, bool) { return nil, false } -// AsBasicIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsBasicIaaSVMProtectableItem() (BasicIaaSVMProtectableItem, bool) { +// AsAzureVMWorkloadSQLInstanceProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsAzureVMWorkloadSQLInstanceProtectableItem() (*AzureVMWorkloadSQLInstanceProtectableItem, bool) { return nil, false } -// AsWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsWorkloadProtectableItem() (*WorkloadProtectableItem, bool) { +// AsIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsIaaSVMProtectableItem() (*IaaSVMProtectableItem, bool) { return nil, false } -// AsBasicWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsBasicWorkloadProtectableItem() (BasicWorkloadProtectableItem, bool) { - return &avwshdpi, true +// AsBasicIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsBasicIaaSVMProtectableItem() (BasicIaaSVMProtectableItem, bool) { + return nil, false } -// AzureVMWorkloadSAPHanaDatabaseProtectedItem azure VM workload-specific protected item representing SAP -// Hana Database. -type AzureVMWorkloadSAPHanaDatabaseProtectedItem struct { +// AsWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsWorkloadProtectableItem() (*WorkloadProtectableItem, bool) { + return nil, false +} + +// AsBasicWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseDatabaseProtectableItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem) AsBasicWorkloadProtectableItem() (BasicWorkloadProtectableItem, bool) { + return &avwsadpi, true +} + +// AzureVMWorkloadSAPAseDatabaseProtectedItem azure VM workload-specific protected item representing SAP +// ASE Database. +type AzureVMWorkloadSAPAseDatabaseProtectedItem struct { // FriendlyName - Friendly name of the DB represented by this backup item. FriendlyName *string `json:"friendlyName,omitempty"` // ServerName - Host/Cluster Name for instance or AG @@ -6211,7 +6427,7 @@ type AzureVMWorkloadSAPHanaDatabaseProtectedItem struct { ExtendedInfo *AzureVMWorkloadProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -6225,158 +6441,163 @@ type AzureVMWorkloadSAPHanaDatabaseProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } -// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) MarshalJSON() ([]byte, error) { - avwshdpi.ProtectedItemType = ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) MarshalJSON() ([]byte, error) { + avwsadpi.ProtectedItemType = ProtectedItemTypeAzureVMWorkloadSAPAseDatabase objectMap := make(map[string]interface{}) - if avwshdpi.FriendlyName != nil { - objectMap["friendlyName"] = avwshdpi.FriendlyName + if avwsadpi.FriendlyName != nil { + objectMap["friendlyName"] = avwsadpi.FriendlyName } - if avwshdpi.ServerName != nil { - objectMap["serverName"] = avwshdpi.ServerName + if avwsadpi.ServerName != nil { + objectMap["serverName"] = avwsadpi.ServerName } - if avwshdpi.ParentName != nil { - objectMap["parentName"] = avwshdpi.ParentName + if avwsadpi.ParentName != nil { + objectMap["parentName"] = avwsadpi.ParentName } - if avwshdpi.ParentType != nil { - objectMap["parentType"] = avwshdpi.ParentType + if avwsadpi.ParentType != nil { + objectMap["parentType"] = avwsadpi.ParentType } - if avwshdpi.ProtectionStatus != nil { - objectMap["protectionStatus"] = avwshdpi.ProtectionStatus + if avwsadpi.ProtectionStatus != nil { + objectMap["protectionStatus"] = avwsadpi.ProtectionStatus } - if avwshdpi.ProtectionState != "" { - objectMap["protectionState"] = avwshdpi.ProtectionState + if avwsadpi.ProtectionState != "" { + objectMap["protectionState"] = avwsadpi.ProtectionState } - if avwshdpi.LastBackupStatus != "" { - objectMap["lastBackupStatus"] = avwshdpi.LastBackupStatus + if avwsadpi.LastBackupStatus != "" { + objectMap["lastBackupStatus"] = avwsadpi.LastBackupStatus } - if avwshdpi.LastBackupTime != nil { - objectMap["lastBackupTime"] = avwshdpi.LastBackupTime + if avwsadpi.LastBackupTime != nil { + objectMap["lastBackupTime"] = avwsadpi.LastBackupTime } - if avwshdpi.LastBackupErrorDetail != nil { - objectMap["lastBackupErrorDetail"] = avwshdpi.LastBackupErrorDetail + if avwsadpi.LastBackupErrorDetail != nil { + objectMap["lastBackupErrorDetail"] = avwsadpi.LastBackupErrorDetail } - if avwshdpi.ProtectedItemDataSourceID != nil { - objectMap["protectedItemDataSourceId"] = avwshdpi.ProtectedItemDataSourceID + if avwsadpi.ProtectedItemDataSourceID != nil { + objectMap["protectedItemDataSourceId"] = avwsadpi.ProtectedItemDataSourceID } - if avwshdpi.ProtectedItemHealthStatus != "" { - objectMap["protectedItemHealthStatus"] = avwshdpi.ProtectedItemHealthStatus + if avwsadpi.ProtectedItemHealthStatus != "" { + objectMap["protectedItemHealthStatus"] = avwsadpi.ProtectedItemHealthStatus } - if avwshdpi.ExtendedInfo != nil { - objectMap["extendedInfo"] = avwshdpi.ExtendedInfo + if avwsadpi.ExtendedInfo != nil { + objectMap["extendedInfo"] = avwsadpi.ExtendedInfo } - if avwshdpi.BackupManagementType != "" { - objectMap["backupManagementType"] = avwshdpi.BackupManagementType + if avwsadpi.BackupManagementType != "" { + objectMap["backupManagementType"] = avwsadpi.BackupManagementType } - if avwshdpi.WorkloadType != "" { - objectMap["workloadType"] = avwshdpi.WorkloadType + if avwsadpi.WorkloadType != "" { + objectMap["workloadType"] = avwsadpi.WorkloadType } - if avwshdpi.ContainerName != nil { - objectMap["containerName"] = avwshdpi.ContainerName + if avwsadpi.ContainerName != nil { + objectMap["containerName"] = avwsadpi.ContainerName } - if avwshdpi.SourceResourceID != nil { - objectMap["sourceResourceId"] = avwshdpi.SourceResourceID + if avwsadpi.SourceResourceID != nil { + objectMap["sourceResourceId"] = avwsadpi.SourceResourceID } - if avwshdpi.PolicyID != nil { - objectMap["policyId"] = avwshdpi.PolicyID + if avwsadpi.PolicyID != nil { + objectMap["policyId"] = avwsadpi.PolicyID } - if avwshdpi.LastRecoveryPoint != nil { - objectMap["lastRecoveryPoint"] = avwshdpi.LastRecoveryPoint + if avwsadpi.LastRecoveryPoint != nil { + objectMap["lastRecoveryPoint"] = avwsadpi.LastRecoveryPoint } - if avwshdpi.BackupSetName != nil { - objectMap["backupSetName"] = avwshdpi.BackupSetName + if avwsadpi.BackupSetName != nil { + objectMap["backupSetName"] = avwsadpi.BackupSetName } - if avwshdpi.CreateMode != "" { - objectMap["createMode"] = avwshdpi.CreateMode + if avwsadpi.CreateMode != "" { + objectMap["createMode"] = avwsadpi.CreateMode } - if avwshdpi.ProtectedItemType != "" { - objectMap["protectedItemType"] = avwshdpi.ProtectedItemType + if avwsadpi.ProtectedItemType != "" { + objectMap["protectedItemType"] = avwsadpi.ProtectedItemType } return json.Marshal(objectMap) } -// AsAzureFileshareProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureFileshareProtectedItem() (*AzureFileshareProtectedItem, bool) { +// AsAzureFileshareProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureFileshareProtectedItem() (*AzureFileshareProtectedItem, bool) { return nil, false } -// AsAzureIaaSClassicComputeVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureIaaSClassicComputeVMProtectedItem() (*AzureIaaSClassicComputeVMProtectedItem, bool) { +// AsAzureIaaSClassicComputeVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureIaaSClassicComputeVMProtectedItem() (*AzureIaaSClassicComputeVMProtectedItem, bool) { return nil, false } -// AsAzureIaaSComputeVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureIaaSComputeVMProtectedItem() (*AzureIaaSComputeVMProtectedItem, bool) { +// AsAzureIaaSComputeVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureIaaSComputeVMProtectedItem() (*AzureIaaSComputeVMProtectedItem, bool) { return nil, false } -// AsAzureIaaSVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureIaaSVMProtectedItem() (*AzureIaaSVMProtectedItem, bool) { +// AsAzureIaaSVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureIaaSVMProtectedItem() (*AzureIaaSVMProtectedItem, bool) { return nil, false } -// AsBasicAzureIaaSVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsBasicAzureIaaSVMProtectedItem() (BasicAzureIaaSVMProtectedItem, bool) { +// AsBasicAzureIaaSVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsBasicAzureIaaSVMProtectedItem() (BasicAzureIaaSVMProtectedItem, bool) { return nil, false } -// AsAzureSQLProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureSQLProtectedItem() (*AzureSQLProtectedItem, bool) { +// AsAzureSQLProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureSQLProtectedItem() (*AzureSQLProtectedItem, bool) { return nil, false } -// AsAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadProtectedItem() (*AzureVMWorkloadProtectedItem, bool) { +// AsAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureVMWorkloadProtectedItem() (*AzureVMWorkloadProtectedItem, bool) { return nil, false } -// AsBasicAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMWorkloadProtectedItem, bool) { - return &avwshdpi, true +// AsBasicAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMWorkloadProtectedItem, bool) { + return &avwsadpi, true } -// AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { - return &avwshdpi, true +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return &avwsadpi, true } -// AsAzureVMWorkloadSQLDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadSQLDatabaseProtectedItem() (*AzureVMWorkloadSQLDatabaseProtectedItem, bool) { +// AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false } -// AsDPMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsDPMProtectedItem() (*DPMProtectedItem, bool) { +// AsAzureVMWorkloadSQLDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsAzureVMWorkloadSQLDatabaseProtectedItem() (*AzureVMWorkloadSQLDatabaseProtectedItem, bool) { return nil, false } -// AsGenericProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsGenericProtectedItem() (*GenericProtectedItem, bool) { +// AsDPMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsDPMProtectedItem() (*DPMProtectedItem, bool) { return nil, false } -// AsMabFileFolderProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsMabFileFolderProtectedItem() (*MabFileFolderProtectedItem, bool) { +// AsGenericProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsGenericProtectedItem() (*GenericProtectedItem, bool) { return nil, false } -// AsProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsProtectedItem() (*ProtectedItem, bool) { +// AsMabFileFolderProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsMabFileFolderProtectedItem() (*MabFileFolderProtectedItem, bool) { return nil, false } -// AsBasicProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. -func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsBasicProtectedItem() (BasicProtectedItem, bool) { - return &avwshdpi, true +// AsProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsProtectedItem() (*ProtectedItem, bool) { + return nil, false } -// AzureVMWorkloadSAPHanaDatabaseWorkloadItem azure VM workload-specific workload item representing SAP -// Hana Database. -type AzureVMWorkloadSAPHanaDatabaseWorkloadItem struct { +// AsBasicProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPAseDatabaseProtectedItem. +func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) AsBasicProtectedItem() (BasicProtectedItem, bool) { + return &avwsadpi, true +} + +// AzureVMWorkloadSAPAseDatabaseWorkloadItem azure VM workload-specific workload item representing SAP ASE +// Database. +type AzureVMWorkloadSAPAseDatabaseWorkloadItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ServerName - Host/Cluster Name for instance or AG @@ -6395,21 +6616,758 @@ type AzureVMWorkloadSAPHanaDatabaseWorkloadItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` } -// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPHanaDatabaseWorkloadItem. -func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) MarshalJSON() ([]byte, error) { - avwshdwi.WorkloadItemType = WorkloadItemTypeSAPHanaDatabase1 +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) MarshalJSON() ([]byte, error) { + avwsadwi.WorkloadItemType = WorkloadItemTypeSAPAseDatabase1 objectMap := make(map[string]interface{}) - if avwshdwi.ParentName != nil { - objectMap["parentName"] = avwshdwi.ParentName + if avwsadwi.ParentName != nil { + objectMap["parentName"] = avwsadwi.ParentName } - if avwshdwi.ServerName != nil { - objectMap["serverName"] = avwshdwi.ServerName + if avwsadwi.ServerName != nil { + objectMap["serverName"] = avwsadwi.ServerName } - if avwshdwi.IsAutoProtectable != nil { + if avwsadwi.IsAutoProtectable != nil { + objectMap["isAutoProtectable"] = avwsadwi.IsAutoProtectable + } + if avwsadwi.Subinquireditemcount != nil { + objectMap["subinquireditemcount"] = avwsadwi.Subinquireditemcount + } + if avwsadwi.SubWorkloadItemCount != nil { + objectMap["subWorkloadItemCount"] = avwsadwi.SubWorkloadItemCount + } + if avwsadwi.BackupManagementType != nil { + objectMap["backupManagementType"] = avwsadwi.BackupManagementType + } + if avwsadwi.WorkloadType != nil { + objectMap["workloadType"] = avwsadwi.WorkloadType + } + if avwsadwi.FriendlyName != nil { + objectMap["friendlyName"] = avwsadwi.FriendlyName + } + if avwsadwi.ProtectionState != "" { + objectMap["protectionState"] = avwsadwi.ProtectionState + } + if avwsadwi.WorkloadItemType != "" { + objectMap["workloadItemType"] = avwsadwi.WorkloadItemType + } + return json.Marshal(objectMap) +} + +// AsAzureVMWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadItem() (*AzureVMWorkloadItem, bool) { + return nil, false +} + +// AsBasicAzureVMWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsBasicAzureVMWorkloadItem() (BasicAzureVMWorkloadItem, bool) { + return &avwsadwi, true +} + +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return &avwsadwi, true +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPHanaSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadSAPHanaSystemWorkloadItem() (*AzureVMWorkloadSAPHanaSystemWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadSQLDatabaseWorkloadItem() (*AzureVMWorkloadSQLDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLInstanceWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsAzureVMWorkloadSQLInstanceWorkloadItem() (*AzureVMWorkloadSQLInstanceWorkloadItem, bool) { + return nil, false +} + +// AsWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsWorkloadItem() (*WorkloadItem, bool) { + return nil, false +} + +// AsBasicWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseDatabaseWorkloadItem. +func (avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem) AsBasicWorkloadItem() (BasicWorkloadItem, bool) { + return &avwsadwi, true +} + +// AzureVMWorkloadSAPAseSystemProtectableItem azure VM workload-specific protectable item representing SAP +// ASE System. +type AzureVMWorkloadSAPAseSystemProtectableItem struct { + // ParentName - Name for instance or AG + ParentName *string `json:"parentName,omitempty"` + // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + ParentUniqueName *string `json:"parentUniqueName,omitempty"` + // ServerName - Host/Cluster Name for instance or AG + ServerName *string `json:"serverName,omitempty"` + // IsAutoProtectable - Indicates if protectable item is auto-protectable + IsAutoProtectable *bool `json:"isAutoProtectable,omitempty"` + // IsAutoProtected - Indicates if protectable item is auto-protected + IsAutoProtected *bool `json:"isAutoProtected,omitempty"` + // Subinquireditemcount - For instance or AG, indicates number of DBs present + Subinquireditemcount *int32 `json:"subinquireditemcount,omitempty"` + // Subprotectableitemcount - For instance or AG, indicates number of DBs to be protected + Subprotectableitemcount *int32 `json:"subprotectableitemcount,omitempty"` + // Prebackupvalidation - Pre-backup validation for protectable objects + Prebackupvalidation *PreBackupValidation `json:"prebackupvalidation,omitempty"` + // BackupManagementType - Type of backup management to backup an item. + BackupManagementType *string `json:"backupManagementType,omitempty"` + // WorkloadType - Type of workload for the backup management + WorkloadType *string `json:"workloadType,omitempty"` + // FriendlyName - Friendly name of the backup item. + FriendlyName *string `json:"friendlyName,omitempty"` + // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' + ProtectionState ProtectionStatus `json:"protectionState,omitempty"` + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) MarshalJSON() ([]byte, error) { + avwsaspi.ProtectableItemType = ProtectableItemTypeSAPAseSystem + objectMap := make(map[string]interface{}) + if avwsaspi.ParentName != nil { + objectMap["parentName"] = avwsaspi.ParentName + } + if avwsaspi.ParentUniqueName != nil { + objectMap["parentUniqueName"] = avwsaspi.ParentUniqueName + } + if avwsaspi.ServerName != nil { + objectMap["serverName"] = avwsaspi.ServerName + } + if avwsaspi.IsAutoProtectable != nil { + objectMap["isAutoProtectable"] = avwsaspi.IsAutoProtectable + } + if avwsaspi.IsAutoProtected != nil { + objectMap["isAutoProtected"] = avwsaspi.IsAutoProtected + } + if avwsaspi.Subinquireditemcount != nil { + objectMap["subinquireditemcount"] = avwsaspi.Subinquireditemcount + } + if avwsaspi.Subprotectableitemcount != nil { + objectMap["subprotectableitemcount"] = avwsaspi.Subprotectableitemcount + } + if avwsaspi.Prebackupvalidation != nil { + objectMap["prebackupvalidation"] = avwsaspi.Prebackupvalidation + } + if avwsaspi.BackupManagementType != nil { + objectMap["backupManagementType"] = avwsaspi.BackupManagementType + } + if avwsaspi.WorkloadType != nil { + objectMap["workloadType"] = avwsaspi.WorkloadType + } + if avwsaspi.FriendlyName != nil { + objectMap["friendlyName"] = avwsaspi.FriendlyName + } + if avwsaspi.ProtectionState != "" { + objectMap["protectionState"] = avwsaspi.ProtectionState + } + if avwsaspi.ProtectableItemType != "" { + objectMap["protectableItemType"] = avwsaspi.ProtectableItemType + } + return json.Marshal(objectMap) +} + +// AsAzureFileShareProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureFileShareProtectableItem() (*AzureFileShareProtectableItem, bool) { + return nil, false +} + +// AsAzureIaaSClassicComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureIaaSClassicComputeVMProtectableItem() (*AzureIaaSClassicComputeVMProtectableItem, bool) { + return nil, false +} + +// AsAzureIaaSComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureIaaSComputeVMProtectableItem() (*AzureIaaSComputeVMProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadProtectableItem() (*AzureVMWorkloadProtectableItem, bool) { + return nil, false +} + +// AsBasicAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsBasicAzureVMWorkloadProtectableItem() (BasicAzureVMWorkloadProtectableItem, bool) { + return &avwsaspi, true +} + +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return &avwsaspi, true +} + +// AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPHanaSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSAPHanaSystemProtectableItem() (*AzureVMWorkloadSAPHanaSystemProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem() (*AzureVMWorkloadSQLAvailabilityGroupProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSQLDatabaseProtectableItem() (*AzureVMWorkloadSQLDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLInstanceProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsAzureVMWorkloadSQLInstanceProtectableItem() (*AzureVMWorkloadSQLInstanceProtectableItem, bool) { + return nil, false +} + +// AsIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsIaaSVMProtectableItem() (*IaaSVMProtectableItem, bool) { + return nil, false +} + +// AsBasicIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsBasicIaaSVMProtectableItem() (BasicIaaSVMProtectableItem, bool) { + return nil, false +} + +// AsWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsWorkloadProtectableItem() (*WorkloadProtectableItem, bool) { + return nil, false +} + +// AsBasicWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPAseSystemProtectableItem. +func (avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem) AsBasicWorkloadProtectableItem() (BasicWorkloadProtectableItem, bool) { + return &avwsaspi, true +} + +// AzureVMWorkloadSAPAseSystemWorkloadItem azure VM workload-specific workload item representing SAP ASE +// System. +type AzureVMWorkloadSAPAseSystemWorkloadItem struct { + // ParentName - Name for instance or AG + ParentName *string `json:"parentName,omitempty"` + // ServerName - Host/Cluster Name for instance or AG + ServerName *string `json:"serverName,omitempty"` + // IsAutoProtectable - Indicates if workload item is auto-protectable + IsAutoProtectable *bool `json:"isAutoProtectable,omitempty"` + // Subinquireditemcount - For instance or AG, indicates number of DBs present + Subinquireditemcount *int32 `json:"subinquireditemcount,omitempty"` + // SubWorkloadItemCount - For instance or AG, indicates number of DBs to be protected + SubWorkloadItemCount *int32 `json:"subWorkloadItemCount,omitempty"` + // BackupManagementType - Type of backup management to backup an item. + BackupManagementType *string `json:"backupManagementType,omitempty"` + // WorkloadType - Type of workload for the backup management + WorkloadType *string `json:"workloadType,omitempty"` + // FriendlyName - Friendly name of the backup item. + FriendlyName *string `json:"friendlyName,omitempty"` + // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' + ProtectionState ProtectionStatus `json:"protectionState,omitempty"` + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) MarshalJSON() ([]byte, error) { + avwsaswi.WorkloadItemType = WorkloadItemTypeSAPAseSystem1 + objectMap := make(map[string]interface{}) + if avwsaswi.ParentName != nil { + objectMap["parentName"] = avwsaswi.ParentName + } + if avwsaswi.ServerName != nil { + objectMap["serverName"] = avwsaswi.ServerName + } + if avwsaswi.IsAutoProtectable != nil { + objectMap["isAutoProtectable"] = avwsaswi.IsAutoProtectable + } + if avwsaswi.Subinquireditemcount != nil { + objectMap["subinquireditemcount"] = avwsaswi.Subinquireditemcount + } + if avwsaswi.SubWorkloadItemCount != nil { + objectMap["subWorkloadItemCount"] = avwsaswi.SubWorkloadItemCount + } + if avwsaswi.BackupManagementType != nil { + objectMap["backupManagementType"] = avwsaswi.BackupManagementType + } + if avwsaswi.WorkloadType != nil { + objectMap["workloadType"] = avwsaswi.WorkloadType + } + if avwsaswi.FriendlyName != nil { + objectMap["friendlyName"] = avwsaswi.FriendlyName + } + if avwsaswi.ProtectionState != "" { + objectMap["protectionState"] = avwsaswi.ProtectionState + } + if avwsaswi.WorkloadItemType != "" { + objectMap["workloadItemType"] = avwsaswi.WorkloadItemType + } + return json.Marshal(objectMap) +} + +// AsAzureVMWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadItem() (*AzureVMWorkloadItem, bool) { + return nil, false +} + +// AsBasicAzureVMWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsBasicAzureVMWorkloadItem() (BasicAzureVMWorkloadItem, bool) { + return &avwsaswi, true +} + +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return &avwsaswi, true +} + +// AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPHanaSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadSAPHanaSystemWorkloadItem() (*AzureVMWorkloadSAPHanaSystemWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadSQLDatabaseWorkloadItem() (*AzureVMWorkloadSQLDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLInstanceWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsAzureVMWorkloadSQLInstanceWorkloadItem() (*AzureVMWorkloadSQLInstanceWorkloadItem, bool) { + return nil, false +} + +// AsWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsWorkloadItem() (*WorkloadItem, bool) { + return nil, false +} + +// AsBasicWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPAseSystemWorkloadItem. +func (avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem) AsBasicWorkloadItem() (BasicWorkloadItem, bool) { + return &avwsaswi, true +} + +// AzureVMWorkloadSAPHanaDatabaseProtectableItem azure VM workload-specific protectable item representing +// SAP HANA Database. +type AzureVMWorkloadSAPHanaDatabaseProtectableItem struct { + // ParentName - Name for instance or AG + ParentName *string `json:"parentName,omitempty"` + // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + ParentUniqueName *string `json:"parentUniqueName,omitempty"` + // ServerName - Host/Cluster Name for instance or AG + ServerName *string `json:"serverName,omitempty"` + // IsAutoProtectable - Indicates if protectable item is auto-protectable + IsAutoProtectable *bool `json:"isAutoProtectable,omitempty"` + // IsAutoProtected - Indicates if protectable item is auto-protected + IsAutoProtected *bool `json:"isAutoProtected,omitempty"` + // Subinquireditemcount - For instance or AG, indicates number of DBs present + Subinquireditemcount *int32 `json:"subinquireditemcount,omitempty"` + // Subprotectableitemcount - For instance or AG, indicates number of DBs to be protected + Subprotectableitemcount *int32 `json:"subprotectableitemcount,omitempty"` + // Prebackupvalidation - Pre-backup validation for protectable objects + Prebackupvalidation *PreBackupValidation `json:"prebackupvalidation,omitempty"` + // BackupManagementType - Type of backup management to backup an item. + BackupManagementType *string `json:"backupManagementType,omitempty"` + // WorkloadType - Type of workload for the backup management + WorkloadType *string `json:"workloadType,omitempty"` + // FriendlyName - Friendly name of the backup item. + FriendlyName *string `json:"friendlyName,omitempty"` + // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' + ProtectionState ProtectionStatus `json:"protectionState,omitempty"` + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) MarshalJSON() ([]byte, error) { + avwshdpi.ProtectableItemType = ProtectableItemTypeSAPHanaDatabase + objectMap := make(map[string]interface{}) + if avwshdpi.ParentName != nil { + objectMap["parentName"] = avwshdpi.ParentName + } + if avwshdpi.ParentUniqueName != nil { + objectMap["parentUniqueName"] = avwshdpi.ParentUniqueName + } + if avwshdpi.ServerName != nil { + objectMap["serverName"] = avwshdpi.ServerName + } + if avwshdpi.IsAutoProtectable != nil { + objectMap["isAutoProtectable"] = avwshdpi.IsAutoProtectable + } + if avwshdpi.IsAutoProtected != nil { + objectMap["isAutoProtected"] = avwshdpi.IsAutoProtected + } + if avwshdpi.Subinquireditemcount != nil { + objectMap["subinquireditemcount"] = avwshdpi.Subinquireditemcount + } + if avwshdpi.Subprotectableitemcount != nil { + objectMap["subprotectableitemcount"] = avwshdpi.Subprotectableitemcount + } + if avwshdpi.Prebackupvalidation != nil { + objectMap["prebackupvalidation"] = avwshdpi.Prebackupvalidation + } + if avwshdpi.BackupManagementType != nil { + objectMap["backupManagementType"] = avwshdpi.BackupManagementType + } + if avwshdpi.WorkloadType != nil { + objectMap["workloadType"] = avwshdpi.WorkloadType + } + if avwshdpi.FriendlyName != nil { + objectMap["friendlyName"] = avwshdpi.FriendlyName + } + if avwshdpi.ProtectionState != "" { + objectMap["protectionState"] = avwshdpi.ProtectionState + } + if avwshdpi.ProtectableItemType != "" { + objectMap["protectableItemType"] = avwshdpi.ProtectableItemType + } + return json.Marshal(objectMap) +} + +// AsAzureFileShareProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureFileShareProtectableItem() (*AzureFileShareProtectableItem, bool) { + return nil, false +} + +// AsAzureIaaSClassicComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureIaaSClassicComputeVMProtectableItem() (*AzureIaaSClassicComputeVMProtectableItem, bool) { + return nil, false +} + +// AsAzureIaaSComputeVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureIaaSComputeVMProtectableItem() (*AzureIaaSComputeVMProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadProtectableItem() (*AzureVMWorkloadProtectableItem, bool) { + return nil, false +} + +// AsBasicAzureVMWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsBasicAzureVMWorkloadProtectableItem() (BasicAzureVMWorkloadProtectableItem, bool) { + return &avwshdpi, true +} + +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { + return &avwshdpi, true +} + +// AsAzureVMWorkloadSAPHanaSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaSystemProtectableItem() (*AzureVMWorkloadSAPHanaSystemProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem() (*AzureVMWorkloadSQLAvailabilityGroupProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSQLDatabaseProtectableItem() (*AzureVMWorkloadSQLDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSQLInstanceProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsAzureVMWorkloadSQLInstanceProtectableItem() (*AzureVMWorkloadSQLInstanceProtectableItem, bool) { + return nil, false +} + +// AsIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsIaaSVMProtectableItem() (*IaaSVMProtectableItem, bool) { + return nil, false +} + +// AsBasicIaaSVMProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsBasicIaaSVMProtectableItem() (BasicIaaSVMProtectableItem, bool) { + return nil, false +} + +// AsWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsWorkloadProtectableItem() (*WorkloadProtectableItem, bool) { + return nil, false +} + +// AsBasicWorkloadProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectableItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem) AsBasicWorkloadProtectableItem() (BasicWorkloadProtectableItem, bool) { + return &avwshdpi, true +} + +// AzureVMWorkloadSAPHanaDatabaseProtectedItem azure VM workload-specific protected item representing SAP +// HANA Database. +type AzureVMWorkloadSAPHanaDatabaseProtectedItem struct { + // FriendlyName - Friendly name of the DB represented by this backup item. + FriendlyName *string `json:"friendlyName,omitempty"` + // ServerName - Host/Cluster Name for instance or AG + ServerName *string `json:"serverName,omitempty"` + // ParentName - Parent name of the DB such as Instance or Availability Group. + ParentName *string `json:"parentName,omitempty"` + // ParentType - Parent type of protected item, example: for a DB, standalone server or distributed + ParentType *string `json:"parentType,omitempty"` + // ProtectionStatus - Backup status of this backup item. + ProtectionStatus *string `json:"protectionStatus,omitempty"` + // ProtectionState - Backup state of this backup item. Possible values include: 'ProtectionStateInvalid', 'ProtectionStateIRPending', 'ProtectionStateProtected', 'ProtectionStateProtectionError', 'ProtectionStateProtectionStopped', 'ProtectionStateProtectionPaused' + ProtectionState ProtectionState `json:"protectionState,omitempty"` + // LastBackupStatus - Last backup operation status. Possible values: Healthy, Unhealthy. Possible values include: 'LastBackupStatusInvalid', 'LastBackupStatusHealthy', 'LastBackupStatusUnhealthy', 'LastBackupStatusIRPending' + LastBackupStatus LastBackupStatus `json:"lastBackupStatus,omitempty"` + // LastBackupTime - Timestamp of the last backup operation on this backup item. + LastBackupTime *date.Time `json:"lastBackupTime,omitempty"` + // LastBackupErrorDetail - Error details in last backup + LastBackupErrorDetail *ErrorDetail `json:"lastBackupErrorDetail,omitempty"` + // ProtectedItemDataSourceID - Data ID of the protected item. + ProtectedItemDataSourceID *string `json:"protectedItemDataSourceId,omitempty"` + // ProtectedItemHealthStatus - Health status of the backup item, evaluated based on last heartbeat received. Possible values include: 'ProtectedItemHealthStatusInvalid', 'ProtectedItemHealthStatusHealthy', 'ProtectedItemHealthStatusUnhealthy', 'ProtectedItemHealthStatusNotReachable', 'ProtectedItemHealthStatusIRPending' + ProtectedItemHealthStatus ProtectedItemHealthStatus `json:"protectedItemHealthStatus,omitempty"` + // ExtendedInfo - Additional information for this backup item. + ExtendedInfo *AzureVMWorkloadProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` + // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' + BackupManagementType ManagementType `json:"backupManagementType,omitempty"` + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' + WorkloadType DataSourceType `json:"workloadType,omitempty"` + // ContainerName - Unique name of container + ContainerName *string `json:"containerName,omitempty"` + // SourceResourceID - ARM ID of the resource to be backed up. + SourceResourceID *string `json:"sourceResourceId,omitempty"` + // PolicyID - ID of the backup policy with which this item is backed up. + PolicyID *string `json:"policyId,omitempty"` + // LastRecoveryPoint - Timestamp when the last (latest) backup copy was created for this backup item. + LastRecoveryPoint *date.Time `json:"lastRecoveryPoint,omitempty"` + // BackupSetName - Name of the backup set the backup item belongs to + BackupSetName *string `json:"backupSetName,omitempty"` + // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' + CreateMode CreateMode `json:"createMode,omitempty"` + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) MarshalJSON() ([]byte, error) { + avwshdpi.ProtectedItemType = ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase + objectMap := make(map[string]interface{}) + if avwshdpi.FriendlyName != nil { + objectMap["friendlyName"] = avwshdpi.FriendlyName + } + if avwshdpi.ServerName != nil { + objectMap["serverName"] = avwshdpi.ServerName + } + if avwshdpi.ParentName != nil { + objectMap["parentName"] = avwshdpi.ParentName + } + if avwshdpi.ParentType != nil { + objectMap["parentType"] = avwshdpi.ParentType + } + if avwshdpi.ProtectionStatus != nil { + objectMap["protectionStatus"] = avwshdpi.ProtectionStatus + } + if avwshdpi.ProtectionState != "" { + objectMap["protectionState"] = avwshdpi.ProtectionState + } + if avwshdpi.LastBackupStatus != "" { + objectMap["lastBackupStatus"] = avwshdpi.LastBackupStatus + } + if avwshdpi.LastBackupTime != nil { + objectMap["lastBackupTime"] = avwshdpi.LastBackupTime + } + if avwshdpi.LastBackupErrorDetail != nil { + objectMap["lastBackupErrorDetail"] = avwshdpi.LastBackupErrorDetail + } + if avwshdpi.ProtectedItemDataSourceID != nil { + objectMap["protectedItemDataSourceId"] = avwshdpi.ProtectedItemDataSourceID + } + if avwshdpi.ProtectedItemHealthStatus != "" { + objectMap["protectedItemHealthStatus"] = avwshdpi.ProtectedItemHealthStatus + } + if avwshdpi.ExtendedInfo != nil { + objectMap["extendedInfo"] = avwshdpi.ExtendedInfo + } + if avwshdpi.BackupManagementType != "" { + objectMap["backupManagementType"] = avwshdpi.BackupManagementType + } + if avwshdpi.WorkloadType != "" { + objectMap["workloadType"] = avwshdpi.WorkloadType + } + if avwshdpi.ContainerName != nil { + objectMap["containerName"] = avwshdpi.ContainerName + } + if avwshdpi.SourceResourceID != nil { + objectMap["sourceResourceId"] = avwshdpi.SourceResourceID + } + if avwshdpi.PolicyID != nil { + objectMap["policyId"] = avwshdpi.PolicyID + } + if avwshdpi.LastRecoveryPoint != nil { + objectMap["lastRecoveryPoint"] = avwshdpi.LastRecoveryPoint + } + if avwshdpi.BackupSetName != nil { + objectMap["backupSetName"] = avwshdpi.BackupSetName + } + if avwshdpi.CreateMode != "" { + objectMap["createMode"] = avwshdpi.CreateMode + } + if avwshdpi.ProtectedItemType != "" { + objectMap["protectedItemType"] = avwshdpi.ProtectedItemType + } + return json.Marshal(objectMap) +} + +// AsAzureFileshareProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureFileshareProtectedItem() (*AzureFileshareProtectedItem, bool) { + return nil, false +} + +// AsAzureIaaSClassicComputeVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureIaaSClassicComputeVMProtectedItem() (*AzureIaaSClassicComputeVMProtectedItem, bool) { + return nil, false +} + +// AsAzureIaaSComputeVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureIaaSComputeVMProtectedItem() (*AzureIaaSComputeVMProtectedItem, bool) { + return nil, false +} + +// AsAzureIaaSVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureIaaSVMProtectedItem() (*AzureIaaSVMProtectedItem, bool) { + return nil, false +} + +// AsBasicAzureIaaSVMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsBasicAzureIaaSVMProtectedItem() (BasicAzureIaaSVMProtectedItem, bool) { + return nil, false +} + +// AsAzureSQLProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureSQLProtectedItem() (*AzureSQLProtectedItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadProtectedItem() (*AzureVMWorkloadProtectedItem, bool) { + return nil, false +} + +// AsBasicAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMWorkloadProtectedItem, bool) { + return &avwshdpi, true +} + +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { + return &avwshdpi, true +} + +// AsAzureVMWorkloadSQLDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsAzureVMWorkloadSQLDatabaseProtectedItem() (*AzureVMWorkloadSQLDatabaseProtectedItem, bool) { + return nil, false +} + +// AsDPMProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsDPMProtectedItem() (*DPMProtectedItem, bool) { + return nil, false +} + +// AsGenericProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsGenericProtectedItem() (*GenericProtectedItem, bool) { + return nil, false +} + +// AsMabFileFolderProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsMabFileFolderProtectedItem() (*MabFileFolderProtectedItem, bool) { + return nil, false +} + +// AsProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsProtectedItem() (*ProtectedItem, bool) { + return nil, false +} + +// AsBasicProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSAPHanaDatabaseProtectedItem. +func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) AsBasicProtectedItem() (BasicProtectedItem, bool) { + return &avwshdpi, true +} + +// AzureVMWorkloadSAPHanaDatabaseWorkloadItem azure VM workload-specific workload item representing SAP +// HANA Database. +type AzureVMWorkloadSAPHanaDatabaseWorkloadItem struct { + // ParentName - Name for instance or AG + ParentName *string `json:"parentName,omitempty"` + // ServerName - Host/Cluster Name for instance or AG + ServerName *string `json:"serverName,omitempty"` + // IsAutoProtectable - Indicates if workload item is auto-protectable + IsAutoProtectable *bool `json:"isAutoProtectable,omitempty"` + // Subinquireditemcount - For instance or AG, indicates number of DBs present + Subinquireditemcount *int32 `json:"subinquireditemcount,omitempty"` + // SubWorkloadItemCount - For instance or AG, indicates number of DBs to be protected + SubWorkloadItemCount *int32 `json:"subWorkloadItemCount,omitempty"` + // BackupManagementType - Type of backup management to backup an item. + BackupManagementType *string `json:"backupManagementType,omitempty"` + // WorkloadType - Type of workload for the backup management + WorkloadType *string `json:"workloadType,omitempty"` + // FriendlyName - Friendly name of the backup item. + FriendlyName *string `json:"friendlyName,omitempty"` + // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' + ProtectionState ProtectionStatus `json:"protectionState,omitempty"` + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureVMWorkloadSAPHanaDatabaseWorkloadItem. +func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) MarshalJSON() ([]byte, error) { + avwshdwi.WorkloadItemType = WorkloadItemTypeSAPHanaDatabase1 + objectMap := make(map[string]interface{}) + if avwshdwi.ParentName != nil { + objectMap["parentName"] = avwshdwi.ParentName + } + if avwshdwi.ServerName != nil { + objectMap["serverName"] = avwshdwi.ServerName + } + if avwshdwi.IsAutoProtectable != nil { objectMap["isAutoProtectable"] = avwshdwi.IsAutoProtectable } if avwshdwi.Subinquireditemcount != nil { @@ -6446,6 +7404,16 @@ func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) AsBasicAzureVMWorkloa return &avwshdwi, true } +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPHanaDatabaseWorkloadItem. +func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPHanaDatabaseWorkloadItem. +func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPHanaDatabaseWorkloadItem. func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { return &avwshdwi, true @@ -6477,12 +7445,12 @@ func (avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem) AsBasicWorkloadItem() } // AzureVMWorkloadSAPHanaSystemProtectableItem azure VM workload-specific protectable item representing SAP -// Hana System. +// HANA System. type AzureVMWorkloadSAPHanaSystemProtectableItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent - // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. ParentUniqueName *string `json:"parentUniqueName,omitempty"` // ServerName - Host/Cluster Name for instance or AG ServerName *string `json:"serverName,omitempty"` @@ -6504,7 +7472,7 @@ type AzureVMWorkloadSAPHanaSystemProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -6579,6 +7547,16 @@ func (avwshspi AzureVMWorkloadSAPHanaSystemProtectableItem) AsBasicAzureVMWorklo return &avwshspi, true } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaSystemProtectableItem. +func (avwshspi AzureVMWorkloadSAPHanaSystemProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaSystemProtectableItem. +func (avwshspi AzureVMWorkloadSAPHanaSystemProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSAPHanaSystemProtectableItem. func (avwshspi AzureVMWorkloadSAPHanaSystemProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -6624,7 +7602,7 @@ func (avwshspi AzureVMWorkloadSAPHanaSystemProtectableItem) AsBasicWorkloadProte return &avwshspi, true } -// AzureVMWorkloadSAPHanaSystemWorkloadItem azure VM workload-specific workload item representing SAP Hana +// AzureVMWorkloadSAPHanaSystemWorkloadItem azure VM workload-specific workload item representing SAP HANA // System. type AzureVMWorkloadSAPHanaSystemWorkloadItem struct { // ParentName - Name for instance or AG @@ -6645,7 +7623,7 @@ type AzureVMWorkloadSAPHanaSystemWorkloadItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` } @@ -6696,6 +7674,16 @@ func (avwshswi AzureVMWorkloadSAPHanaSystemWorkloadItem) AsBasicAzureVMWorkloadI return &avwshswi, true } +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPHanaSystemWorkloadItem. +func (avwshswi AzureVMWorkloadSAPHanaSystemWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPHanaSystemWorkloadItem. +func (avwshswi AzureVMWorkloadSAPHanaSystemWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSAPHanaSystemWorkloadItem. func (avwshswi AzureVMWorkloadSAPHanaSystemWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { return nil, false @@ -6732,7 +7720,7 @@ type AzureVMWorkloadSQLAvailabilityGroupProtectableItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent - // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. ParentUniqueName *string `json:"parentUniqueName,omitempty"` // ServerName - Host/Cluster Name for instance or AG ServerName *string `json:"serverName,omitempty"` @@ -6754,7 +7742,7 @@ type AzureVMWorkloadSQLAvailabilityGroupProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -6829,6 +7817,16 @@ func (avwsagpi AzureVMWorkloadSQLAvailabilityGroupProtectableItem) AsBasicAzureV return &avwsagpi, true } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLAvailabilityGroupProtectableItem. +func (avwsagpi AzureVMWorkloadSQLAvailabilityGroupProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLAvailabilityGroupProtectableItem. +func (avwsagpi AzureVMWorkloadSQLAvailabilityGroupProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLAvailabilityGroupProtectableItem. func (avwsagpi AzureVMWorkloadSQLAvailabilityGroupProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -6880,7 +7878,7 @@ type AzureVMWorkloadSQLDatabaseProtectableItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent - // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. ParentUniqueName *string `json:"parentUniqueName,omitempty"` // ServerName - Host/Cluster Name for instance or AG ServerName *string `json:"serverName,omitempty"` @@ -6902,7 +7900,7 @@ type AzureVMWorkloadSQLDatabaseProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -6977,6 +7975,16 @@ func (avwsdpi AzureVMWorkloadSQLDatabaseProtectableItem) AsBasicAzureVMWorkloadP return &avwsdpi, true } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLDatabaseProtectableItem. +func (avwsdpi AzureVMWorkloadSQLDatabaseProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLDatabaseProtectableItem. +func (avwsdpi AzureVMWorkloadSQLDatabaseProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLDatabaseProtectableItem. func (avwsdpi AzureVMWorkloadSQLDatabaseProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -7031,7 +8039,7 @@ type AzureVMWorkloadSQLDatabaseProtectedItem struct { ServerName *string `json:"serverName,omitempty"` // ParentName - Parent name of the DB such as Instance or Availability Group. ParentName *string `json:"parentName,omitempty"` - // ParentType - Parent type of DB, SQLAG or StandAlone + // ParentType - Parent type of protected item, example: for a DB, standalone server or distributed ParentType *string `json:"parentType,omitempty"` // ProtectionStatus - Backup status of this backup item. ProtectionStatus *string `json:"protectionStatus,omitempty"` @@ -7051,7 +8059,7 @@ type AzureVMWorkloadSQLDatabaseProtectedItem struct { ExtendedInfo *AzureVMWorkloadProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -7065,7 +8073,7 @@ type AzureVMWorkloadSQLDatabaseProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -7176,6 +8184,11 @@ func (avwsdpi AzureVMWorkloadSQLDatabaseProtectedItem) AsAzureVMWorkloadProtecte // AsBasicAzureVMWorkloadProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSQLDatabaseProtectedItem. func (avwsdpi AzureVMWorkloadSQLDatabaseProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMWorkloadProtectedItem, bool) { + return &avwsdpi, true +} + +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for AzureVMWorkloadSQLDatabaseProtectedItem. +func (avwsdpi AzureVMWorkloadSQLDatabaseProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { return nil, false } @@ -7235,7 +8248,7 @@ type AzureVMWorkloadSQLDatabaseWorkloadItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` } @@ -7286,6 +8299,16 @@ func (avwsdwi AzureVMWorkloadSQLDatabaseWorkloadItem) AsBasicAzureVMWorkloadItem return &avwsdwi, true } +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSQLDatabaseWorkloadItem. +func (avwsdwi AzureVMWorkloadSQLDatabaseWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSQLDatabaseWorkloadItem. +func (avwsdwi AzureVMWorkloadSQLDatabaseWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSQLDatabaseWorkloadItem. func (avwsdwi AzureVMWorkloadSQLDatabaseWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { return nil, false @@ -7322,7 +8345,7 @@ type AzureVMWorkloadSQLInstanceProtectableItem struct { // ParentName - Name for instance or AG ParentName *string `json:"parentName,omitempty"` // ParentUniqueName - Parent Unique Name is added to provide the service formatted URI Name of the Parent - // Only Applicable for data bases where the parent would be either Instance or a SQL AG. + // Only Applicable for data bases where the parent would be either Instance or a SQL AG. ParentUniqueName *string `json:"parentUniqueName,omitempty"` // ServerName - Host/Cluster Name for instance or AG ServerName *string `json:"serverName,omitempty"` @@ -7344,7 +8367,7 @@ type AzureVMWorkloadSQLInstanceProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -7419,6 +8442,16 @@ func (avwsipi AzureVMWorkloadSQLInstanceProtectableItem) AsBasicAzureVMWorkloadP return &avwsipi, true } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLInstanceProtectableItem. +func (avwsipi AzureVMWorkloadSQLInstanceProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLInstanceProtectableItem. +func (avwsipi AzureVMWorkloadSQLInstanceProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for AzureVMWorkloadSQLInstanceProtectableItem. func (avwsipi AzureVMWorkloadSQLInstanceProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -7487,7 +8520,7 @@ type AzureVMWorkloadSQLInstanceWorkloadItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` } @@ -7541,6 +8574,16 @@ func (avwsiwi AzureVMWorkloadSQLInstanceWorkloadItem) AsBasicAzureVMWorkloadItem return &avwsiwi, true } +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSQLInstanceWorkloadItem. +func (avwsiwi AzureVMWorkloadSQLInstanceWorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSQLInstanceWorkloadItem. +func (avwsiwi AzureVMWorkloadSQLInstanceWorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for AzureVMWorkloadSQLInstanceWorkloadItem. func (avwsiwi AzureVMWorkloadSQLInstanceWorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { return nil, false @@ -7766,8 +8809,10 @@ type AzureWorkloadContainer struct { LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` // ExtendedInfo - Additional details of a workload container. ExtendedInfo *AzureWorkloadContainerExtendedInfo `json:"extendedInfo,omitempty"` - // WorkloadType - Workload type for which registration was sent. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type for which registration was sent. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` + // OperationType - Re-Do Operation. Possible values include: 'OperationTypeInvalid', 'OperationTypeRegister', 'OperationTypeReregister' + OperationType OperationType `json:"operationType,omitempty"` // FriendlyName - Friendly name of the container. FriendlyName *string `json:"friendlyName,omitempty"` // BackupManagementType - Type of backup management for the container. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' @@ -7776,7 +8821,7 @@ type AzureWorkloadContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -7823,7 +8868,7 @@ func unmarshalBasicAzureWorkloadContainerArray(body []byte) ([]BasicAzureWorkloa // MarshalJSON is the custom marshaler for AzureWorkloadContainer. func (awc AzureWorkloadContainer) MarshalJSON() ([]byte, error) { - awc.ContainerType = ContainerTypeAzureWorkloadBackupRequest + awc.ContainerType = ContainerTypeAzureWorkloadContainer objectMap := make(map[string]interface{}) if awc.SourceResourceID != nil { objectMap["sourceResourceId"] = awc.SourceResourceID @@ -7837,6 +8882,9 @@ func (awc AzureWorkloadContainer) MarshalJSON() ([]byte, error) { if awc.WorkloadType != "" { objectMap["workloadType"] = awc.WorkloadType } + if awc.OperationType != "" { + objectMap["operationType"] = awc.OperationType + } if awc.FriendlyName != nil { objectMap["friendlyName"] = awc.FriendlyName } @@ -7905,6 +8953,11 @@ func (awc AzureWorkloadContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for AzureWorkloadContainer. +func (awc AzureWorkloadContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for AzureWorkloadContainer. func (awc AzureWorkloadContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -8103,8 +9156,263 @@ type AzureWorkloadJobTaskDetails struct { Status *string `json:"status,omitempty"` } +// BasicAzureWorkloadPointInTimeRecoveryPoint recovery point specific to PointInTime +type BasicAzureWorkloadPointInTimeRecoveryPoint interface { + AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint() (*AzureWorkloadSAPHanaPointInTimeRecoveryPoint, bool) + AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) +} + +// AzureWorkloadPointInTimeRecoveryPoint recovery point specific to PointInTime +type AzureWorkloadPointInTimeRecoveryPoint struct { + // TimeRanges - List of log ranges + TimeRanges *[]PointInTimeRange `json:"timeRanges,omitempty"` + // RecoveryPointTimeInUTC - UTC time at which recovery point was created + RecoveryPointTimeInUTC *date.Time `json:"recoveryPointTimeInUTC,omitempty"` + // Type - Type of restore point. Possible values include: 'RestorePointTypeInvalid', 'RestorePointTypeFull', 'RestorePointTypeLog', 'RestorePointTypeDifferential' + Type RestorePointType `json:"type,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` +} + +func unmarshalBasicAzureWorkloadPointInTimeRecoveryPoint(body []byte) (BasicAzureWorkloadPointInTimeRecoveryPoint, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint): + var awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint + err := json.Unmarshal(body, &awshpitrp) + return awshpitrp, err + default: + var awpitrp AzureWorkloadPointInTimeRecoveryPoint + err := json.Unmarshal(body, &awpitrp) + return awpitrp, err + } +} +func unmarshalBasicAzureWorkloadPointInTimeRecoveryPointArray(body []byte) ([]BasicAzureWorkloadPointInTimeRecoveryPoint, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + awpitrpArray := make([]BasicAzureWorkloadPointInTimeRecoveryPoint, len(rawMessages)) + + for index, rawMessage := range rawMessages { + awpitrp, err := unmarshalBasicAzureWorkloadPointInTimeRecoveryPoint(*rawMessage) + if err != nil { + return nil, err + } + awpitrpArray[index] = awpitrp + } + return awpitrpArray, nil +} + +// MarshalJSON is the custom marshaler for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) MarshalJSON() ([]byte, error) { + awpitrp.ObjectType = ObjectTypeAzureWorkloadPointInTimeRecoveryPoint + objectMap := make(map[string]interface{}) + if awpitrp.TimeRanges != nil { + objectMap["timeRanges"] = awpitrp.TimeRanges + } + if awpitrp.RecoveryPointTimeInUTC != nil { + objectMap["recoveryPointTimeInUTC"] = awpitrp.RecoveryPointTimeInUTC + } + if awpitrp.Type != "" { + objectMap["type"] = awpitrp.Type + } + if awpitrp.ObjectType != "" { + objectMap["objectType"] = awpitrp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureFileShareRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureFileShareRecoveryPoint() (*AzureFileShareRecoveryPoint, bool) { + return nil, false +} + +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return &awpitrp, true +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return &awpitrp, true +} + +// AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsBasicAzureWorkloadRecoveryPoint() (BasicAzureWorkloadRecoveryPoint, bool) { + return &awpitrp, true +} + +// AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint() (*AzureWorkloadSAPHanaPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWorkloadSAPHanaRecoveryPoint, bool) { + return nil, false +} + +// AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsAzureWorkloadSQLRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsAzureWorkloadSQLRecoveryPoint() (*AzureWorkloadSQLRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadSQLRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsBasicAzureWorkloadSQLRecoveryPoint() (BasicAzureWorkloadSQLRecoveryPoint, bool) { + return nil, false +} + +// AsGenericRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsGenericRecoveryPoint() (*GenericRecoveryPoint, bool) { + return nil, false +} + +// AsIaasVMRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsIaasVMRecoveryPoint() (*IaasVMRecoveryPoint, bool) { + return nil, false +} + +// AsRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsRecoveryPoint() (*RecoveryPoint, bool) { + return nil, false +} + +// AsBasicRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadPointInTimeRecoveryPoint. +func (awpitrp AzureWorkloadPointInTimeRecoveryPoint) AsBasicRecoveryPoint() (BasicRecoveryPoint, bool) { + return &awpitrp, true +} + +// AzureWorkloadPointInTimeRestoreRequest azureWorkload SAP Hana -specific restore. Specifically for +// PointInTime/Log restore +type AzureWorkloadPointInTimeRestoreRequest struct { + // PointInTime - PointInTime value + PointInTime *date.Time `json:"pointInTime,omitempty"` + // RecoveryType - OLR/ALR, RestoreDisks is invalid option. Possible values include: 'RecoveryTypeInvalid', 'RecoveryTypeOriginalLocation', 'RecoveryTypeAlternateLocation', 'RecoveryTypeRestoreDisks' + RecoveryType RecoveryType `json:"recoveryType,omitempty"` + // SourceResourceID - Fully qualified ARM ID of the VM on which workload that was running is being recovered. + SourceResourceID *string `json:"sourceResourceId,omitempty"` + // PropertyBag - Workload specific property bag. + PropertyBag map[string]*string `json:"propertyBag"` + // TargetInfo - Details of target database + TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) MarshalJSON() ([]byte, error) { + awpitrr.ObjectType = ObjectTypeAzureWorkloadPointInTimeRestoreRequest + objectMap := make(map[string]interface{}) + if awpitrr.PointInTime != nil { + objectMap["pointInTime"] = awpitrr.PointInTime + } + if awpitrr.RecoveryType != "" { + objectMap["recoveryType"] = awpitrr.RecoveryType + } + if awpitrr.SourceResourceID != nil { + objectMap["sourceResourceId"] = awpitrr.SourceResourceID + } + if awpitrr.PropertyBag != nil { + objectMap["propertyBag"] = awpitrr.PropertyBag + } + if awpitrr.TargetInfo != nil { + objectMap["targetInfo"] = awpitrr.TargetInfo + } + if awpitrr.ObjectType != "" { + objectMap["objectType"] = awpitrr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureFileShareRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureFileShareRestoreRequest() (*AzureFileShareRestoreRequest, bool) { + return nil, false +} + +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return &awpitrr, true +} + +// AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsBasicAzureWorkloadRestoreRequest() (BasicAzureWorkloadRestoreRequest, bool) { + return &awpitrr, true +} + +// AsAzureWorkloadSAPHanaPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureWorkloadSAPHanaPointInTimeRestoreRequest() (*AzureWorkloadSAPHanaPointInTimeRestoreRequest, bool) { + return nil, false +} + +// AsAzureWorkloadSAPHanaRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureWorkloadSAPHanaRestoreRequest() (*AzureWorkloadSAPHanaRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureWorkloadSAPHanaRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsBasicAzureWorkloadSAPHanaRestoreRequest() (BasicAzureWorkloadSAPHanaRestoreRequest, bool) { + return nil, false +} + +// AsAzureWorkloadSQLPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureWorkloadSQLPointInTimeRestoreRequest() (*AzureWorkloadSQLPointInTimeRestoreRequest, bool) { + return nil, false +} + +// AsAzureWorkloadSQLRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsAzureWorkloadSQLRestoreRequest() (*AzureWorkloadSQLRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureWorkloadSQLRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsBasicAzureWorkloadSQLRestoreRequest() (BasicAzureWorkloadSQLRestoreRequest, bool) { + return nil, false +} + +// AsIaasVMRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsIaasVMRestoreRequest() (*IaasVMRestoreRequest, bool) { + return nil, false +} + +// AsRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsRestoreRequest() (*RestoreRequest, bool) { + return nil, false +} + +// AsBasicRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadPointInTimeRestoreRequest. +func (awpitrr AzureWorkloadPointInTimeRestoreRequest) AsBasicRestoreRequest() (BasicRestoreRequest, bool) { + return &awpitrr, true +} + // BasicAzureWorkloadRecoveryPoint workload specific recovery point, specifically encapsulates full/diff recovery point type BasicAzureWorkloadRecoveryPoint interface { + AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) + AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) + AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint() (*AzureWorkloadSAPHanaPointInTimeRecoveryPoint, bool) + AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWorkloadSAPHanaRecoveryPoint, bool) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) AsAzureWorkloadSQLRecoveryPoint() (*AzureWorkloadSQLRecoveryPoint, bool) AsBasicAzureWorkloadSQLRecoveryPoint() (BasicAzureWorkloadSQLRecoveryPoint, bool) @@ -8118,7 +9426,7 @@ type AzureWorkloadRecoveryPoint struct { RecoveryPointTimeInUTC *date.Time `json:"recoveryPointTimeInUTC,omitempty"` // Type - Type of restore point. Possible values include: 'RestorePointTypeInvalid', 'RestorePointTypeFull', 'RestorePointTypeLog', 'RestorePointTypeDifferential' Type RestorePointType `json:"type,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -8130,6 +9438,18 @@ func unmarshalBasicAzureWorkloadRecoveryPoint(body []byte) (BasicAzureWorkloadRe } switch m["objectType"] { + case string(ObjectTypeAzureWorkloadPointInTimeRecoveryPoint): + var awpitrp AzureWorkloadPointInTimeRecoveryPoint + err := json.Unmarshal(body, &awpitrp) + return awpitrp, err + case string(ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint): + var awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint + err := json.Unmarshal(body, &awshpitrp) + return awshpitrp, err + case string(ObjectTypeAzureWorkloadSAPHanaRecoveryPoint): + var awshrp AzureWorkloadSAPHanaRecoveryPoint + err := json.Unmarshal(body, &awshrp) + return awshrp, err case string(ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint): var awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint err := json.Unmarshal(body, &awspitrp) @@ -8184,6 +9504,16 @@ func (awrp AzureWorkloadRecoveryPoint) AsAzureFileShareRecoveryPoint() (*AzureFi return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadRecoveryPoint. +func (awrp AzureWorkloadRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadRecoveryPoint. +func (awrp AzureWorkloadRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadRecoveryPoint. func (awrp AzureWorkloadRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return &awrp, true @@ -8204,11 +9534,6 @@ func (awrp AzureWorkloadRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() (*A return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadRecoveryPoint. -func (awrp AzureWorkloadRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadRecoveryPoint. func (awrp AzureWorkloadRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -8246,6 +9571,10 @@ func (awrp AzureWorkloadRecoveryPoint) AsBasicRecoveryPoint() (BasicRecoveryPoin // BasicAzureWorkloadRestoreRequest azureWorkload-specific restore. type BasicAzureWorkloadRestoreRequest interface { + AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) + AsAzureWorkloadSAPHanaPointInTimeRestoreRequest() (*AzureWorkloadSAPHanaPointInTimeRestoreRequest, bool) + AsAzureWorkloadSAPHanaRestoreRequest() (*AzureWorkloadSAPHanaRestoreRequest, bool) + AsBasicAzureWorkloadSAPHanaRestoreRequest() (BasicAzureWorkloadSAPHanaRestoreRequest, bool) AsAzureWorkloadSQLPointInTimeRestoreRequest() (*AzureWorkloadSQLPointInTimeRestoreRequest, bool) AsAzureWorkloadSQLRestoreRequest() (*AzureWorkloadSQLRestoreRequest, bool) AsBasicAzureWorkloadSQLRestoreRequest() (BasicAzureWorkloadSQLRestoreRequest, bool) @@ -8260,7 +9589,9 @@ type AzureWorkloadRestoreRequest struct { SourceResourceID *string `json:"sourceResourceId,omitempty"` // PropertyBag - Workload specific property bag. PropertyBag map[string]*string `json:"propertyBag"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // TargetInfo - Details of target database + TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -8272,6 +9603,18 @@ func unmarshalBasicAzureWorkloadRestoreRequest(body []byte) (BasicAzureWorkloadR } switch m["objectType"] { + case string(ObjectTypeAzureWorkloadPointInTimeRestoreRequest): + var awpitrr AzureWorkloadPointInTimeRestoreRequest + err := json.Unmarshal(body, &awpitrr) + return awpitrr, err + case string(ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest): + var awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest + err := json.Unmarshal(body, &awshpitrr) + return awshpitrr, err + case string(ObjectTypeAzureWorkloadSAPHanaRestoreRequest): + var awshrr AzureWorkloadSAPHanaRestoreRequest + err := json.Unmarshal(body, &awshrr) + return awshrr, err case string(ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest): var awspitrr AzureWorkloadSQLPointInTimeRestoreRequest err := json.Unmarshal(body, &awspitrr) @@ -8318,6 +9661,9 @@ func (awrr AzureWorkloadRestoreRequest) MarshalJSON() ([]byte, error) { if awrr.PropertyBag != nil { objectMap["propertyBag"] = awrr.PropertyBag } + if awrr.TargetInfo != nil { + objectMap["targetInfo"] = awrr.TargetInfo + } if awrr.ObjectType != "" { objectMap["objectType"] = awrr.ObjectType } @@ -8329,6 +9675,11 @@ func (awrr AzureWorkloadRestoreRequest) AsAzureFileShareRestoreRequest() (*Azure return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadRestoreRequest. +func (awrr AzureWorkloadRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadRestoreRequest. func (awrr AzureWorkloadRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return &awrr, true @@ -8392,7 +9743,7 @@ type AzureWorkloadSAPHanaPointInTimeRecoveryPoint struct { RecoveryPointTimeInUTC *date.Time `json:"recoveryPointTimeInUTC,omitempty"` // Type - Type of restore point. Possible values include: 'RestorePointTypeInvalid', 'RestorePointTypeFull', 'RestorePointTypeLog', 'RestorePointTypeDifferential' Type RestorePointType `json:"type,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -8420,6 +9771,16 @@ func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsAzureFileShareRe return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. +func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. +func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return &awshpitrp, true +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -8427,7 +9788,7 @@ func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsAzureWorkloadRec // AsBasicAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsBasicAzureWorkloadRecoveryPoint() (BasicAzureWorkloadRecoveryPoint, bool) { - return nil, false + return &awshpitrp, true } // AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. @@ -8440,11 +9801,6 @@ func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsAzureWorkloadSAP return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. -func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return &awshpitrp, true -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaPointInTimeRecoveryPoint. func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -8485,15 +9841,15 @@ func (awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint) AsBasicRecoveryPoi type AzureWorkloadSAPHanaPointInTimeRestoreRequest struct { // PointInTime - PointInTime value PointInTime *date.Time `json:"pointInTime,omitempty"` - // TargetInfo - Details of target database - TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` // RecoveryType - OLR/ALR, RestoreDisks is invalid option. Possible values include: 'RecoveryTypeInvalid', 'RecoveryTypeOriginalLocation', 'RecoveryTypeAlternateLocation', 'RecoveryTypeRestoreDisks' RecoveryType RecoveryType `json:"recoveryType,omitempty"` // SourceResourceID - Fully qualified ARM ID of the VM on which workload that was running is being recovered. SourceResourceID *string `json:"sourceResourceId,omitempty"` // PropertyBag - Workload specific property bag. PropertyBag map[string]*string `json:"propertyBag"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // TargetInfo - Details of target database + TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -8504,9 +9860,6 @@ func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) MarshalJSON() ([] if awshpitrr.PointInTime != nil { objectMap["pointInTime"] = awshpitrr.PointInTime } - if awshpitrr.TargetInfo != nil { - objectMap["targetInfo"] = awshpitrr.TargetInfo - } if awshpitrr.RecoveryType != "" { objectMap["recoveryType"] = awshpitrr.RecoveryType } @@ -8516,6 +9869,9 @@ func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) MarshalJSON() ([] if awshpitrr.PropertyBag != nil { objectMap["propertyBag"] = awshpitrr.PropertyBag } + if awshpitrr.TargetInfo != nil { + objectMap["targetInfo"] = awshpitrr.TargetInfo + } if awshpitrr.ObjectType != "" { objectMap["objectType"] = awshpitrr.ObjectType } @@ -8527,6 +9883,11 @@ func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) AsAzureFileShareR return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaPointInTimeRestoreRequest. +func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaPointInTimeRestoreRequest. func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -8534,7 +9895,7 @@ func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) AsAzureWorkloadRe // AsBasicAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaPointInTimeRestoreRequest. func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) AsBasicAzureWorkloadRestoreRequest() (BasicAzureWorkloadRestoreRequest, bool) { - return nil, false + return &awshpitrr, true } // AsAzureWorkloadSAPHanaPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaPointInTimeRestoreRequest. @@ -8582,13 +9943,6 @@ func (awshpitrr AzureWorkloadSAPHanaPointInTimeRestoreRequest) AsBasicRestoreReq return &awshpitrr, true } -// BasicAzureWorkloadSAPHanaRecoveryPoint sAPHana specific recovery point, specifically encapsulates full/diff recovery -// points -type BasicAzureWorkloadSAPHanaRecoveryPoint interface { - AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint() (*AzureWorkloadSAPHanaPointInTimeRecoveryPoint, bool) - AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWorkloadSAPHanaRecoveryPoint, bool) -} - // AzureWorkloadSAPHanaRecoveryPoint sAPHana specific recovery point, specifically encapsulates full/diff // recovery points type AzureWorkloadSAPHanaRecoveryPoint struct { @@ -8596,47 +9950,10 @@ type AzureWorkloadSAPHanaRecoveryPoint struct { RecoveryPointTimeInUTC *date.Time `json:"recoveryPointTimeInUTC,omitempty"` // Type - Type of restore point. Possible values include: 'RestorePointTypeInvalid', 'RestorePointTypeFull', 'RestorePointTypeLog', 'RestorePointTypeDifferential' Type RestorePointType `json:"type,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } -func unmarshalBasicAzureWorkloadSAPHanaRecoveryPoint(body []byte) (BasicAzureWorkloadSAPHanaRecoveryPoint, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["objectType"] { - case string(ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint): - var awshpitrp AzureWorkloadSAPHanaPointInTimeRecoveryPoint - err := json.Unmarshal(body, &awshpitrp) - return awshpitrp, err - default: - var awshrp AzureWorkloadSAPHanaRecoveryPoint - err := json.Unmarshal(body, &awshrp) - return awshrp, err - } -} -func unmarshalBasicAzureWorkloadSAPHanaRecoveryPointArray(body []byte) ([]BasicAzureWorkloadSAPHanaRecoveryPoint, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - awshrpArray := make([]BasicAzureWorkloadSAPHanaRecoveryPoint, len(rawMessages)) - - for index, rawMessage := range rawMessages { - awshrp, err := unmarshalBasicAzureWorkloadSAPHanaRecoveryPoint(*rawMessage) - if err != nil { - return nil, err - } - awshrpArray[index] = awshrp - } - return awshrpArray, nil -} - // MarshalJSON is the custom marshaler for AzureWorkloadSAPHanaRecoveryPoint. func (awshrp AzureWorkloadSAPHanaRecoveryPoint) MarshalJSON() ([]byte, error) { awshrp.ObjectType = ObjectTypeAzureWorkloadSAPHanaRecoveryPoint @@ -8658,6 +9975,16 @@ func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsAzureFileShareRecoveryPoint() return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. +func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. +func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -8665,7 +9992,7 @@ func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsAzureWorkloadRecoveryPoint() ( // AsBasicAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsBasicAzureWorkloadRecoveryPoint() (BasicAzureWorkloadRecoveryPoint, bool) { - return nil, false + return &awshrp, true } // AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. @@ -8678,11 +10005,6 @@ func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPo return &awshrp, true } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. -func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return &awshrp, true -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSAPHanaRecoveryPoint. func (awshrp AzureWorkloadSAPHanaRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -8726,15 +10048,15 @@ type BasicAzureWorkloadSAPHanaRestoreRequest interface { // AzureWorkloadSAPHanaRestoreRequest azureWorkload SAP Hana-specific restore. type AzureWorkloadSAPHanaRestoreRequest struct { - // TargetInfo - Details of target database - TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` // RecoveryType - OLR/ALR, RestoreDisks is invalid option. Possible values include: 'RecoveryTypeInvalid', 'RecoveryTypeOriginalLocation', 'RecoveryTypeAlternateLocation', 'RecoveryTypeRestoreDisks' RecoveryType RecoveryType `json:"recoveryType,omitempty"` // SourceResourceID - Fully qualified ARM ID of the VM on which workload that was running is being recovered. SourceResourceID *string `json:"sourceResourceId,omitempty"` // PropertyBag - Workload specific property bag. PropertyBag map[string]*string `json:"propertyBag"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // TargetInfo - Details of target database + TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -8779,9 +10101,6 @@ func unmarshalBasicAzureWorkloadSAPHanaRestoreRequestArray(body []byte) ([]Basic func (awshrr AzureWorkloadSAPHanaRestoreRequest) MarshalJSON() ([]byte, error) { awshrr.ObjectType = ObjectTypeAzureWorkloadSAPHanaRestoreRequest objectMap := make(map[string]interface{}) - if awshrr.TargetInfo != nil { - objectMap["targetInfo"] = awshrr.TargetInfo - } if awshrr.RecoveryType != "" { objectMap["recoveryType"] = awshrr.RecoveryType } @@ -8791,6 +10110,9 @@ func (awshrr AzureWorkloadSAPHanaRestoreRequest) MarshalJSON() ([]byte, error) { if awshrr.PropertyBag != nil { objectMap["propertyBag"] = awshrr.PropertyBag } + if awshrr.TargetInfo != nil { + objectMap["targetInfo"] = awshrr.TargetInfo + } if awshrr.ObjectType != "" { objectMap["objectType"] = awshrr.ObjectType } @@ -8802,6 +10124,11 @@ func (awshrr AzureWorkloadSAPHanaRestoreRequest) AsAzureFileShareRestoreRequest( return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaRestoreRequest. +func (awshrr AzureWorkloadSAPHanaRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaRestoreRequest. func (awshrr AzureWorkloadSAPHanaRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -8809,7 +10136,7 @@ func (awshrr AzureWorkloadSAPHanaRestoreRequest) AsAzureWorkloadRestoreRequest() // AsBasicAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaRestoreRequest. func (awshrr AzureWorkloadSAPHanaRestoreRequest) AsBasicAzureWorkloadRestoreRequest() (BasicAzureWorkloadRestoreRequest, bool) { - return nil, false + return &awshrr, true } // AsAzureWorkloadSAPHanaPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSAPHanaRestoreRequest. @@ -8859,7 +10186,7 @@ func (awshrr AzureWorkloadSAPHanaRestoreRequest) AsBasicRestoreRequest() (BasicR // AzureWorkloadSQLAutoProtectionIntent azure Workload SQL Auto Protection intent item. type AzureWorkloadSQLAutoProtectionIntent struct { - // WorkloadItemType - Workload item type of the item for which intent is to be set. Possible values include: 'WorkloadItemTypeInvalid', 'WorkloadItemTypeSQLInstance', 'WorkloadItemTypeSQLDataBase', 'WorkloadItemTypeSAPHanaSystem', 'WorkloadItemTypeSAPHanaDatabase' + // WorkloadItemType - Workload item type of the item for which intent is to be set. Possible values include: 'WorkloadItemTypeInvalid', 'WorkloadItemTypeSQLInstance', 'WorkloadItemTypeSQLDataBase', 'WorkloadItemTypeSAPHanaSystem', 'WorkloadItemTypeSAPHanaDatabase', 'WorkloadItemTypeSAPAseSystem', 'WorkloadItemTypeSAPAseDatabase' WorkloadItemType WorkloadItemType `json:"workloadItemType,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` @@ -8948,14 +10275,14 @@ type AzureWorkloadSQLPointInTimeRecoveryPoint struct { // TimeRanges - List of log ranges TimeRanges *[]PointInTimeRange `json:"timeRanges,omitempty"` // ExtendedInfo - Extended Info that provides data directory details. Will be populated in two cases: - // When a specific recovery point is accessed using GetRecoveryPoint - // Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter + // When a specific recovery point is accessed using GetRecoveryPoint + // Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter ExtendedInfo *AzureWorkloadSQLRecoveryPointExtendedInfo `json:"extendedInfo,omitempty"` // RecoveryPointTimeInUTC - UTC time at which recovery point was created RecoveryPointTimeInUTC *date.Time `json:"recoveryPointTimeInUTC,omitempty"` // Type - Type of restore point. Possible values include: 'RestorePointTypeInvalid', 'RestorePointTypeFull', 'RestorePointTypeLog', 'RestorePointTypeDifferential' Type RestorePointType `json:"type,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -8986,6 +10313,16 @@ func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsAzureFileShareRecover return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLPointInTimeRecoveryPoint. +func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLPointInTimeRecoveryPoint. +func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLPointInTimeRecoveryPoint. func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -9006,11 +10343,6 @@ func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsAzureWorkloadSAPHanaR return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLPointInTimeRecoveryPoint. -func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLPointInTimeRecoveryPoint. func (awspitrp AzureWorkloadSQLPointInTimeRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return &awspitrp, true @@ -9055,8 +10387,6 @@ type AzureWorkloadSQLPointInTimeRestoreRequest struct { ShouldUseAlternateTargetLocation *bool `json:"shouldUseAlternateTargetLocation,omitempty"` // IsNonRecoverable - SQL specific property where user can chose to set no-recovery when restore operation is tried IsNonRecoverable *bool `json:"isNonRecoverable,omitempty"` - // TargetInfo - Details of target database - TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` // AlternateDirectoryPaths - Data directory details AlternateDirectoryPaths *[]SQLDataDirectoryMapping `json:"alternateDirectoryPaths,omitempty"` // RecoveryType - OLR/ALR, RestoreDisks is invalid option. Possible values include: 'RecoveryTypeInvalid', 'RecoveryTypeOriginalLocation', 'RecoveryTypeAlternateLocation', 'RecoveryTypeRestoreDisks' @@ -9065,7 +10395,9 @@ type AzureWorkloadSQLPointInTimeRestoreRequest struct { SourceResourceID *string `json:"sourceResourceId,omitempty"` // PropertyBag - Workload specific property bag. PropertyBag map[string]*string `json:"propertyBag"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // TargetInfo - Details of target database + TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -9082,9 +10414,6 @@ func (awspitrr AzureWorkloadSQLPointInTimeRestoreRequest) MarshalJSON() ([]byte, if awspitrr.IsNonRecoverable != nil { objectMap["isNonRecoverable"] = awspitrr.IsNonRecoverable } - if awspitrr.TargetInfo != nil { - objectMap["targetInfo"] = awspitrr.TargetInfo - } if awspitrr.AlternateDirectoryPaths != nil { objectMap["alternateDirectoryPaths"] = awspitrr.AlternateDirectoryPaths } @@ -9097,6 +10426,9 @@ func (awspitrr AzureWorkloadSQLPointInTimeRestoreRequest) MarshalJSON() ([]byte, if awspitrr.PropertyBag != nil { objectMap["propertyBag"] = awspitrr.PropertyBag } + if awspitrr.TargetInfo != nil { + objectMap["targetInfo"] = awspitrr.TargetInfo + } if awspitrr.ObjectType != "" { objectMap["objectType"] = awspitrr.ObjectType } @@ -9108,6 +10440,11 @@ func (awspitrr AzureWorkloadSQLPointInTimeRestoreRequest) AsAzureFileShareRestor return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSQLPointInTimeRestoreRequest. +func (awspitrr AzureWorkloadSQLPointInTimeRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSQLPointInTimeRestoreRequest. func (awspitrr AzureWorkloadSQLPointInTimeRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -9174,14 +10511,14 @@ type BasicAzureWorkloadSQLRecoveryPoint interface { // point along with extended info type AzureWorkloadSQLRecoveryPoint struct { // ExtendedInfo - Extended Info that provides data directory details. Will be populated in two cases: - // When a specific recovery point is accessed using GetRecoveryPoint - // Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter + // When a specific recovery point is accessed using GetRecoveryPoint + // Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter ExtendedInfo *AzureWorkloadSQLRecoveryPointExtendedInfo `json:"extendedInfo,omitempty"` // RecoveryPointTimeInUTC - UTC time at which recovery point was created RecoveryPointTimeInUTC *date.Time `json:"recoveryPointTimeInUTC,omitempty"` // Type - Type of restore point. Possible values include: 'RestorePointTypeInvalid', 'RestorePointTypeFull', 'RestorePointTypeLog', 'RestorePointTypeDifferential' Type RestorePointType `json:"type,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -9246,6 +10583,16 @@ func (awsrp AzureWorkloadSQLRecoveryPoint) AsAzureFileShareRecoveryPoint() (*Azu return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLRecoveryPoint. +func (awsrp AzureWorkloadSQLRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLRecoveryPoint. +func (awsrp AzureWorkloadSQLRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLRecoveryPoint. func (awsrp AzureWorkloadSQLRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -9266,11 +10613,6 @@ func (awsrp AzureWorkloadSQLRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLRecoveryPoint. -func (awsrp AzureWorkloadSQLRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for AzureWorkloadSQLRecoveryPoint. func (awsrp AzureWorkloadSQLRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -9326,8 +10668,6 @@ type AzureWorkloadSQLRestoreRequest struct { ShouldUseAlternateTargetLocation *bool `json:"shouldUseAlternateTargetLocation,omitempty"` // IsNonRecoverable - SQL specific property where user can chose to set no-recovery when restore operation is tried IsNonRecoverable *bool `json:"isNonRecoverable,omitempty"` - // TargetInfo - Details of target database - TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` // AlternateDirectoryPaths - Data directory details AlternateDirectoryPaths *[]SQLDataDirectoryMapping `json:"alternateDirectoryPaths,omitempty"` // RecoveryType - OLR/ALR, RestoreDisks is invalid option. Possible values include: 'RecoveryTypeInvalid', 'RecoveryTypeOriginalLocation', 'RecoveryTypeAlternateLocation', 'RecoveryTypeRestoreDisks' @@ -9336,7 +10676,9 @@ type AzureWorkloadSQLRestoreRequest struct { SourceResourceID *string `json:"sourceResourceId,omitempty"` // PropertyBag - Workload specific property bag. PropertyBag map[string]*string `json:"propertyBag"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // TargetInfo - Details of target database + TargetInfo *TargetRestoreInfo `json:"targetInfo,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -9387,9 +10729,6 @@ func (awsrr AzureWorkloadSQLRestoreRequest) MarshalJSON() ([]byte, error) { if awsrr.IsNonRecoverable != nil { objectMap["isNonRecoverable"] = awsrr.IsNonRecoverable } - if awsrr.TargetInfo != nil { - objectMap["targetInfo"] = awsrr.TargetInfo - } if awsrr.AlternateDirectoryPaths != nil { objectMap["alternateDirectoryPaths"] = awsrr.AlternateDirectoryPaths } @@ -9402,6 +10741,9 @@ func (awsrr AzureWorkloadSQLRestoreRequest) MarshalJSON() ([]byte, error) { if awsrr.PropertyBag != nil { objectMap["propertyBag"] = awsrr.PropertyBag } + if awsrr.TargetInfo != nil { + objectMap["targetInfo"] = awsrr.TargetInfo + } if awsrr.ObjectType != "" { objectMap["objectType"] = awsrr.ObjectType } @@ -9413,6 +10755,11 @@ func (awsrr AzureWorkloadSQLRestoreRequest) AsAzureFileShareRestoreRequest() (*A return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSQLRestoreRequest. +func (awsrr AzureWorkloadSQLRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for AzureWorkloadSQLRestoreRequest. func (awsrr AzureWorkloadSQLRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -9520,7 +10867,7 @@ type BMSContainerQueryObject struct { type BMSContainersInquiryQueryObject struct { // BackupManagementType - Backup management type for this container. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Workload type for this container. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type for this container. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` } @@ -9528,7 +10875,7 @@ type BMSContainersInquiryQueryObject struct { type BMSPOQueryObject struct { // BackupManagementType - Backup management type. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Workload type. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` // ContainerName - Full name of the container whose Protectable Objects should be returned. ContainerName *string `json:"containerName,omitempty"` @@ -9560,9 +10907,9 @@ type BMSRPQueryObject struct { type BMSWorkloadItemQueryObject struct { // BackupManagementType - Backup management type. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadItemType - Workload Item type. Possible values include: 'WorkloadItemTypeInvalid', 'WorkloadItemTypeSQLInstance', 'WorkloadItemTypeSQLDataBase', 'WorkloadItemTypeSAPHanaSystem', 'WorkloadItemTypeSAPHanaDatabase' + // WorkloadItemType - Workload Item type. Possible values include: 'WorkloadItemTypeInvalid', 'WorkloadItemTypeSQLInstance', 'WorkloadItemTypeSQLDataBase', 'WorkloadItemTypeSAPHanaSystem', 'WorkloadItemTypeSAPHanaDatabase', 'WorkloadItemTypeSAPAseSystem', 'WorkloadItemTypeSAPAseDatabase' WorkloadItemType WorkloadItemType `json:"workloadItemType,omitempty"` - // WorkloadType - Workload type. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` // ProtectionStatus - Backup status query parameter. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionStatus ProtectionStatus `json:"protectionStatus,omitempty"` @@ -9773,8 +11120,8 @@ type ClientScriptForConnect struct { OsType *string `json:"osType,omitempty"` // URL - URL of Executable from where to source the content. If this is not null then ScriptContent should not be used URL *string `json:"url,omitempty"` - // ScriptNameSuffix - Mandator suffix that should be added to the name of script that is given for download to user. - // If its null or empty then , ignore it. + // ScriptNameSuffix - Mandatory suffix that should be added to the name of script that is given for download to user. + // If its null or empty then , ignore it. ScriptNameSuffix *string `json:"scriptNameSuffix,omitempty"` } @@ -9817,7 +11164,7 @@ type DistributedNodesInfo struct { // NodeName - Name of the node under a distributed container. NodeName *string `json:"nodeName,omitempty"` // Status - Status of this Node. - // Failed | Succeeded + // Failed | Succeeded Status *string `json:"status,omitempty"` // ErrorDetail - Error Details if the Status is non-success. ErrorDetail *ErrorDetail `json:"errorDetail,omitempty"` @@ -9919,6 +11266,12 @@ func (dbe DpmBackupEngine) AsBasicEngineBase() (BasicEngineBase, bool) { return &dbe, true } +// BasicDpmContainer DPM workload-specific protection container. +type BasicDpmContainer interface { + AsAzureBackupServerContainer() (*AzureBackupServerContainer, bool) + AsDpmContainer() (*DpmContainer, bool) +} + // DpmContainer DPM workload-specific protection container. type DpmContainer struct { // CanReRegister - Specifies whether the container is re-registrable. @@ -9945,10 +11298,47 @@ type DpmContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } +func unmarshalBasicDpmContainer(body []byte) (BasicDpmContainer, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["containerType"] { + case string(ContainerTypeAzureBackupServerContainer1): + var absc AzureBackupServerContainer + err := json.Unmarshal(body, &absc) + return absc, err + default: + var dc DpmContainer + err := json.Unmarshal(body, &dc) + return dc, err + } +} +func unmarshalBasicDpmContainerArray(body []byte) ([]BasicDpmContainer, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + dcArray := make([]BasicDpmContainer, len(rawMessages)) + + for index, rawMessage := range rawMessages { + dc, err := unmarshalBasicDpmContainer(*rawMessage) + if err != nil { + return nil, err + } + dcArray[index] = dc + } + return dcArray, nil +} + // MarshalJSON is the custom marshaler for DpmContainer. func (dc DpmContainer) MarshalJSON() ([]byte, error) { dc.ContainerType = ContainerTypeDPMContainer1 @@ -10045,6 +11435,11 @@ func (dc DpmContainer) AsDpmContainer() (*DpmContainer, bool) { return &dc, true } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for DpmContainer. +func (dc DpmContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return &dc, true +} + // AsGenericContainer is the BasicProtectionContainer implementation for DpmContainer. func (dc DpmContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -10268,7 +11663,7 @@ type DPMProtectedItem struct { ExtendedInfo *DPMProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -10282,7 +11677,7 @@ type DPMProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -10375,6 +11770,11 @@ func (dpi DPMProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMW return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for DPMProtectedItem. +func (dpi DPMProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for DPMProtectedItem. func (dpi DPMProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -11090,7 +12490,7 @@ type GenericContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -11172,6 +12572,11 @@ func (gc GenericContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for GenericContainer. +func (gc GenericContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for GenericContainer. func (gc GenericContainer) AsGenericContainer() (*GenericContainer, bool) { return &gc, true @@ -11243,7 +12648,7 @@ type GenericProtectedItem struct { FabricName *string `json:"fabricName,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -11257,7 +12662,7 @@ type GenericProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -11353,6 +12758,11 @@ func (gpi GenericProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzur return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for GenericProtectedItem. +func (gpi GenericProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for GenericProtectedItem. func (gpi GenericProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -11474,7 +12884,7 @@ type GenericRecoveryPoint struct { RecoveryPointTime *date.Time `json:"recoveryPointTime,omitempty"` // RecoveryPointAdditionalInfo - Additional information associated with this backup copy. RecoveryPointAdditionalInfo *string `json:"recoveryPointAdditionalInfo,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -11505,6 +12915,16 @@ func (grp GenericRecoveryPoint) AsAzureFileShareRecoveryPoint() (*AzureFileShare return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for GenericRecoveryPoint. +func (grp GenericRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for GenericRecoveryPoint. +func (grp GenericRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for GenericRecoveryPoint. func (grp GenericRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -11525,11 +12945,6 @@ func (grp GenericRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWor return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for GenericRecoveryPoint. -func (grp GenericRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for GenericRecoveryPoint. func (grp GenericRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -11640,7 +13055,7 @@ type IaaSVMContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -11766,6 +13181,11 @@ func (isc IaaSVMContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for IaaSVMContainer. +func (isc IaaSVMContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for IaaSVMContainer. func (isc IaaSVMContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -11866,7 +13286,7 @@ type IaaSVMProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -11961,6 +13381,16 @@ func (ispi IaaSVMProtectableItem) AsBasicAzureVMWorkloadProtectableItem() (Basic return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for IaaSVMProtectableItem. +func (ispi IaaSVMProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for IaaSVMProtectableItem. +func (ispi IaaSVMProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for IaaSVMProtectableItem. func (ispi IaaSVMProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false @@ -12032,7 +13462,7 @@ type IaasVMRecoveryPoint struct { OriginalStorageAccountOption *bool `json:"originalStorageAccountOption,omitempty"` // OsType - OS type OsType *string `json:"osType,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -12087,6 +13517,16 @@ func (ivrp IaasVMRecoveryPoint) AsAzureFileShareRecoveryPoint() (*AzureFileShare return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for IaasVMRecoveryPoint. +func (ivrp IaasVMRecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for IaasVMRecoveryPoint. +func (ivrp IaasVMRecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for IaasVMRecoveryPoint. func (ivrp IaasVMRecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -12107,11 +13547,6 @@ func (ivrp IaasVMRecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWor return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for IaasVMRecoveryPoint. -func (ivrp IaasVMRecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for IaasVMRecoveryPoint. func (ivrp IaasVMRecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -12156,35 +13591,35 @@ type IaasVMRestoreRequest struct { // SourceResourceID - Fully qualified ARM ID of the VM which is being recovered. SourceResourceID *string `json:"sourceResourceId,omitempty"` // TargetVirtualMachineID - This is the complete ARM Id of the VM that will be created. - // For e.g. /subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm} + // For e.g. /subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm} TargetVirtualMachineID *string `json:"targetVirtualMachineId,omitempty"` // TargetResourceGroupID - This is the ARM Id of the resource group that you want to create for this Virtual machine and other artifacts. - // For e.g. /subscriptions/{subId}/resourcegroups/{rg} + // For e.g. /subscriptions/{subId}/resourcegroups/{rg} TargetResourceGroupID *string `json:"targetResourceGroupId,omitempty"` // StorageAccountID - Fully qualified ARM ID of the storage account to which the VM has to be restored. StorageAccountID *string `json:"storageAccountId,omitempty"` // VirtualNetworkID - This is the virtual network Id of the vnet that will be attached to the virtual machine. - // User will be validated for join action permissions in the linked access. + // User will be validated for join action permissions in the linked access. VirtualNetworkID *string `json:"virtualNetworkId,omitempty"` // SubnetID - Subnet ID, is the subnet ID associated with the to be restored VM. For Classic VMs it would be - // {VnetID}/Subnet/{SubnetName} and, for the Azure Resource Manager VMs it would be ARM resource ID used to represent - // the subnet. + // {VnetID}/Subnet/{SubnetName} and, for the Azure Resource Manager VMs it would be ARM resource ID used to represent + // the subnet. SubnetID *string `json:"subnetId,omitempty"` // TargetDomainNameID - Fully qualified ARM ID of the domain name to be associated to the VM being restored. This applies only to Classic - // Virtual Machines. + // Virtual Machines. TargetDomainNameID *string `json:"targetDomainNameId,omitempty"` // Region - Region in which the virtual machine is restored. Region *string `json:"region,omitempty"` // AffinityGroup - Affinity group associated to VM to be restored. Used only for Classic Compute Virtual Machines. AffinityGroup *string `json:"affinityGroup,omitempty"` // CreateNewCloudService - Should a new cloud service be created while restoring the VM. If this is false, VM will be restored to the same - // cloud service as it was at the time of backup. + // cloud service as it was at the time of backup. CreateNewCloudService *bool `json:"createNewCloudService,omitempty"` // OriginalStorageAccountOption - Original Storage Account Option OriginalStorageAccountOption *bool `json:"originalStorageAccountOption,omitempty"` // EncryptionDetails - Details needed if the VM was encrypted at the time of backup. EncryptionDetails *EncryptionDetails `json:"encryptionDetails,omitempty"` - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -12245,6 +13680,11 @@ func (ivrr IaasVMRestoreRequest) AsAzureFileShareRestoreRequest() (*AzureFileSha return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for IaasVMRestoreRequest. +func (ivrr IaasVMRestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for IaasVMRestoreRequest. func (ivrr IaasVMRestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -12497,12 +13937,12 @@ func (irr *ILRRequestResource) UnmarshalJSON(body []byte) error { // InquiryInfo details about inquired protectable items under a given container. type InquiryInfo struct { // Status - Inquiry Status for this container such as - // InProgress | Failed | Succeeded + // InProgress | Failed | Succeeded Status *string `json:"status,omitempty"` // ErrorDetail - Error Details if the Status is non-success. ErrorDetail *ErrorDetail `json:"errorDetail,omitempty"` // InquiryDetails - Inquiry Details which will have workload specific details. - // For e.g. - For SQL and oracle this will contain different details. + // For e.g. - For SQL and oracle this will contain different details. InquiryDetails *[]WorkloadInquiryDetails `json:"inquiryDetails,omitempty"` } @@ -13145,7 +14585,7 @@ type MabContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -13242,6 +14682,11 @@ func (mc MabContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for MabContainer. +func (mc MabContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for MabContainer. func (mc MabContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -13276,7 +14721,7 @@ func (mc MabContainer) AsBasicProtectionContainer() (BasicProtectionContainer, b type MabContainerExtendedInfo struct { // LastRefreshedAt - Time stamp when this container was refreshed. LastRefreshedAt *date.Time `json:"lastRefreshedAt,omitempty"` - // BackupItemType - Type of backup items associated with this container. Possible values include: 'ItemTypeInvalid', 'ItemTypeVM', 'ItemTypeFileFolder', 'ItemTypeAzureSQLDb', 'ItemTypeSQLDB', 'ItemTypeExchange', 'ItemTypeSharepoint', 'ItemTypeVMwareVM', 'ItemTypeSystemState', 'ItemTypeClient', 'ItemTypeGenericDataSource', 'ItemTypeSQLDataBase', 'ItemTypeAzureFileShare', 'ItemTypeSAPHanaDatabase' + // BackupItemType - Type of backup items associated with this container. Possible values include: 'ItemTypeInvalid', 'ItemTypeVM', 'ItemTypeFileFolder', 'ItemTypeAzureSQLDb', 'ItemTypeSQLDB', 'ItemTypeExchange', 'ItemTypeSharepoint', 'ItemTypeVMwareVM', 'ItemTypeSystemState', 'ItemTypeClient', 'ItemTypeGenericDataSource', 'ItemTypeSQLDataBase', 'ItemTypeAzureFileShare', 'ItemTypeSAPHanaDatabase', 'ItemTypeSAPAseDatabase' BackupItemType ItemType `json:"backupItemType,omitempty"` // BackupItems - List of backup items associated with this container. BackupItems *[]string `json:"backupItems,omitempty"` @@ -13324,7 +14769,7 @@ type MabFileFolderProtectedItem struct { ExtendedInfo *MabFileFolderProtectedItemExtendedInfo `json:"extendedInfo,omitempty"` // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -13338,7 +14783,7 @@ type MabFileFolderProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -13437,6 +14882,11 @@ func (mffpi MabFileFolderProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (B return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for MabFileFolderProtectedItem. +func (mffpi MabFileFolderProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for MabFileFolderProtectedItem. func (mffpi MabFileFolderProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -13492,7 +14942,7 @@ type MabJob struct { MabServerName *string `json:"mabServerName,omitempty"` // MabServerType - Server type of MAB container. Possible values include: 'MabServerTypeInvalid', 'MabServerTypeUnknown', 'MabServerTypeIaasVMContainer', 'MabServerTypeIaasVMServiceContainer', 'MabServerTypeDPMContainer', 'MabServerTypeAzureBackupServerContainer', 'MabServerTypeMABContainer', 'MabServerTypeCluster', 'MabServerTypeAzureSQLContainer', 'MabServerTypeWindows', 'MabServerTypeVCenter', 'MabServerTypeVMAppContainer', 'MabServerTypeSQLAGWorkLoadContainer', 'MabServerTypeStorageContainer', 'MabServerTypeGenericContainer' MabServerType MabServerType `json:"mabServerType,omitempty"` - // WorkloadType - Workload type of backup item. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type of backup item. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` // ErrorDetails - The errors. ErrorDetails *[]MabErrorInfo `json:"errorDetails,omitempty"` @@ -14379,10 +15829,12 @@ type PreBackupValidation struct { // 2. VM is already protected // 3. Any VM related configuration passed in properties. type PreValidateEnableBackupRequest struct { - // ResourceType - ProtectedItem Type- VM, SqlDataBase, AzureFileShare etc. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // ResourceType - ProtectedItem Type- VM, SqlDataBase, AzureFileShare etc. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' ResourceType DataSourceType `json:"resourceType,omitempty"` // ResourceID - ARM Virtual Machine Id ResourceID *string `json:"resourceId,omitempty"` + // VaultID - Specifies the arm resource id of the vault + VaultID *string `json:"vaultId,omitempty"` // Properties - Configuration of VM if any needs to be validated like OS type etc Properties *string `json:"properties,omitempty"` } @@ -14399,7 +15851,7 @@ type PreValidateEnableBackupResponse struct { // Recommendation - Recommended action for user Recommendation *string `json:"recommendation,omitempty"` // ContainerName - Specifies the product specific container name. E.g. iaasvmcontainer;iaasvmcontainer;rgname;vmname. This is required - // for portal + // for portal ContainerName *string `json:"containerName,omitempty"` // ProtectedItemName - Specifies the product specific ds name. E.g. vm;iaasvmcontainer;rgname;vmname. This is required for portal ProtectedItemName *string `json:"protectedItemName,omitempty"` @@ -14786,6 +16238,7 @@ type BasicProtectedItem interface { AsAzureSQLProtectedItem() (*AzureSQLProtectedItem, bool) AsAzureVMWorkloadProtectedItem() (*AzureVMWorkloadProtectedItem, bool) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMWorkloadProtectedItem, bool) + AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) AsAzureVMWorkloadSQLDatabaseProtectedItem() (*AzureVMWorkloadSQLDatabaseProtectedItem, bool) AsDPMProtectedItem() (*DPMProtectedItem, bool) @@ -14798,7 +16251,7 @@ type BasicProtectedItem interface { type ProtectedItem struct { // BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // WorkloadType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' WorkloadType DataSourceType `json:"workloadType,omitempty"` // ContainerName - Unique name of container ContainerName *string `json:"containerName,omitempty"` @@ -14812,7 +16265,7 @@ type ProtectedItem struct { BackupSetName *string `json:"backupSetName,omitempty"` // CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover' CreateMode CreateMode `json:"createMode,omitempty"` - // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' + // ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem' ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"` } @@ -14848,6 +16301,10 @@ func unmarshalBasicProtectedItem(body []byte) (BasicProtectedItem, error) { var avwpi AzureVMWorkloadProtectedItem err := json.Unmarshal(body, &avwpi) return avwpi, err + case string(ProtectedItemTypeAzureVMWorkloadSAPAseDatabase): + var avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem + err := json.Unmarshal(body, &avwsadpi) + return avwsadpi, err case string(ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase): var avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem err := json.Unmarshal(body, &avwshdpi) @@ -14967,6 +16424,11 @@ func (pi ProtectedItem) AsBasicAzureVMWorkloadProtectedItem() (BasicAzureVMWorkl return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectedItem is the BasicProtectedItem implementation for ProtectedItem. +func (pi ProtectedItem) AsAzureVMWorkloadSAPAseDatabaseProtectedItem() (*AzureVMWorkloadSAPAseDatabaseProtectedItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectedItem is the BasicProtectedItem implementation for ProtectedItem. func (pi ProtectedItem) AsAzureVMWorkloadSAPHanaDatabaseProtectedItem() (*AzureVMWorkloadSAPHanaDatabaseProtectedItem, bool) { return nil, false @@ -15008,7 +16470,7 @@ type ProtectedItemQueryObject struct { HealthState HealthState `json:"healthState,omitempty"` // BackupManagementType - Backup management type for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup' BackupManagementType ManagementType `json:"backupManagementType,omitempty"` - // ItemType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // ItemType - Type of workload this item represents. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' ItemType DataSourceType `json:"itemType,omitempty"` // PolicyName - Backup policy name associated with the backup item. PolicyName *string `json:"policyName,omitempty"` @@ -15304,6 +16766,7 @@ type BasicProtectionContainer interface { AsAzureWorkloadContainer() (*AzureWorkloadContainer, bool) AsBasicAzureWorkloadContainer() (BasicAzureWorkloadContainer, bool) AsDpmContainer() (*DpmContainer, bool) + AsBasicDpmContainer() (BasicDpmContainer, bool) AsGenericContainer() (*GenericContainer, bool) AsIaaSVMContainer() (*IaaSVMContainer, bool) AsBasicIaaSVMContainer() (BasicIaaSVMContainer, bool) @@ -15322,7 +16785,7 @@ type ProtectionContainer struct { RegistrationStatus *string `json:"registrationStatus,omitempty"` // HealthStatus - Status of health of the container. HealthStatus *string `json:"healthStatus,omitempty"` - // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadBackupRequest', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' + // ContainerType - Possible values include: 'ContainerTypeProtectionContainer', 'ContainerTypeAzureBackupServerContainer1', 'ContainerTypeMicrosoftClassicComputevirtualMachines', 'ContainerTypeMicrosoftComputevirtualMachines', 'ContainerTypeSQLAGWorkLoadContainer1', 'ContainerTypeAzureSQLContainer1', 'ContainerTypeStorageContainer1', 'ContainerTypeVMAppContainer1', 'ContainerTypeAzureWorkloadContainer', 'ContainerTypeDPMContainer1', 'ContainerTypeGenericContainer1', 'ContainerTypeIaaSVMContainer', 'ContainerTypeWindows1' ContainerType ContainerTypeBasicProtectionContainer `json:"containerType,omitempty"` } @@ -15362,7 +16825,7 @@ func unmarshalBasicProtectionContainer(body []byte) (BasicProtectionContainer, e var avacpc AzureVMAppContainerProtectionContainer err := json.Unmarshal(body, &avacpc) return avacpc, err - case string(ContainerTypeAzureWorkloadBackupRequest): + case string(ContainerTypeAzureWorkloadContainer): var awc AzureWorkloadContainer err := json.Unmarshal(body, &awc) return awc, err @@ -15479,6 +16942,11 @@ func (pc ProtectionContainer) AsDpmContainer() (*DpmContainer, bool) { return nil, false } +// AsBasicDpmContainer is the BasicProtectionContainer implementation for ProtectionContainer. +func (pc ProtectionContainer) AsBasicDpmContainer() (BasicDpmContainer, bool) { + return nil, false +} + // AsGenericContainer is the BasicProtectionContainer implementation for ProtectionContainer. func (pc ProtectionContainer) AsGenericContainer() (*GenericContainer, bool) { return nil, false @@ -16335,7 +17803,7 @@ type ProtectionPolicyQueryObject struct { BackupManagementType ManagementType `json:"backupManagementType,omitempty"` // FabricName - Fabric name for filter FabricName *string `json:"fabricName,omitempty"` - // WorkloadType - Workload type for the backup policy. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase' + // WorkloadType - Workload type for the backup policy. Possible values include: 'WorkloadTypeInvalid', 'WorkloadTypeVM', 'WorkloadTypeFileFolder', 'WorkloadTypeAzureSQLDb', 'WorkloadTypeSQLDB', 'WorkloadTypeExchange', 'WorkloadTypeSharepoint', 'WorkloadTypeVMwareVM', 'WorkloadTypeSystemState', 'WorkloadTypeClient', 'WorkloadTypeGenericDataSource', 'WorkloadTypeSQLDataBase', 'WorkloadTypeAzureFileShare', 'WorkloadTypeSAPHanaDatabase', 'WorkloadTypeSAPAseDatabase' WorkloadType WorkloadType `json:"workloadType,omitempty"` } @@ -16611,11 +18079,12 @@ func NewProtectionPolicyResourceListPage(getNextPage func(context.Context, Prote // BasicRecoveryPoint base class for backup copies. Workload-specific backup copies are derived from this class. type BasicRecoveryPoint interface { AsAzureFileShareRecoveryPoint() (*AzureFileShareRecoveryPoint, bool) + AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) + AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) AsBasicAzureWorkloadRecoveryPoint() (BasicAzureWorkloadRecoveryPoint, bool) AsAzureWorkloadSAPHanaPointInTimeRecoveryPoint() (*AzureWorkloadSAPHanaPointInTimeRecoveryPoint, bool) AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWorkloadSAPHanaRecoveryPoint, bool) - AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) AsAzureWorkloadSQLRecoveryPoint() (*AzureWorkloadSQLRecoveryPoint, bool) AsBasicAzureWorkloadSQLRecoveryPoint() (BasicAzureWorkloadSQLRecoveryPoint, bool) @@ -16626,7 +18095,7 @@ type BasicRecoveryPoint interface { // RecoveryPoint base class for backup copies. Workload-specific backup copies are derived from this class. type RecoveryPoint struct { - // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' + // ObjectType - Possible values include: 'ObjectTypeRecoveryPoint', 'ObjectTypeAzureFileShareRecoveryPoint', 'ObjectTypeAzureWorkloadPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSAPHanaRecoveryPoint', 'ObjectTypeAzureWorkloadSQLPointInTimeRecoveryPoint', 'ObjectTypeAzureWorkloadSQLRecoveryPoint', 'ObjectTypeGenericRecoveryPoint', 'ObjectTypeIaasVMRecoveryPoint' ObjectType ObjectTypeBasicRecoveryPoint `json:"objectType,omitempty"` } @@ -16642,6 +18111,10 @@ func unmarshalBasicRecoveryPoint(body []byte) (BasicRecoveryPoint, error) { var afsrp AzureFileShareRecoveryPoint err := json.Unmarshal(body, &afsrp) return afsrp, err + case string(ObjectTypeAzureWorkloadPointInTimeRecoveryPoint): + var awpitrp AzureWorkloadPointInTimeRecoveryPoint + err := json.Unmarshal(body, &awpitrp) + return awpitrp, err case string(ObjectTypeAzureWorkloadRecoveryPoint): var awrp AzureWorkloadRecoveryPoint err := json.Unmarshal(body, &awrp) @@ -16710,6 +18183,16 @@ func (rp RecoveryPoint) AsAzureFileShareRecoveryPoint() (*AzureFileShareRecovery return nil, false } +// AsAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for RecoveryPoint. +func (rp RecoveryPoint) AsAzureWorkloadPointInTimeRecoveryPoint() (*AzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureWorkloadPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for RecoveryPoint. +func (rp RecoveryPoint) AsBasicAzureWorkloadPointInTimeRecoveryPoint() (BasicAzureWorkloadPointInTimeRecoveryPoint, bool) { + return nil, false +} + // AsAzureWorkloadRecoveryPoint is the BasicRecoveryPoint implementation for RecoveryPoint. func (rp RecoveryPoint) AsAzureWorkloadRecoveryPoint() (*AzureWorkloadRecoveryPoint, bool) { return nil, false @@ -16730,11 +18213,6 @@ func (rp RecoveryPoint) AsAzureWorkloadSAPHanaRecoveryPoint() (*AzureWorkloadSAP return nil, false } -// AsBasicAzureWorkloadSAPHanaRecoveryPoint is the BasicRecoveryPoint implementation for RecoveryPoint. -func (rp RecoveryPoint) AsBasicAzureWorkloadSAPHanaRecoveryPoint() (BasicAzureWorkloadSAPHanaRecoveryPoint, bool) { - return nil, false -} - // AsAzureWorkloadSQLPointInTimeRecoveryPoint is the BasicRecoveryPoint implementation for RecoveryPoint. func (rp RecoveryPoint) AsAzureWorkloadSQLPointInTimeRecoveryPoint() (*AzureWorkloadSQLPointInTimeRecoveryPoint, bool) { return nil, false @@ -17434,6 +18912,7 @@ type RestoreFileSpecs struct { // BasicRestoreRequest base class for restore request. Workload-specific restore requests are derived from this class. type BasicRestoreRequest interface { AsAzureFileShareRestoreRequest() (*AzureFileShareRestoreRequest, bool) + AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) AsBasicAzureWorkloadRestoreRequest() (BasicAzureWorkloadRestoreRequest, bool) AsAzureWorkloadSAPHanaPointInTimeRestoreRequest() (*AzureWorkloadSAPHanaPointInTimeRestoreRequest, bool) @@ -17449,7 +18928,7 @@ type BasicRestoreRequest interface { // RestoreRequest base class for restore request. Workload-specific restore requests are derived from this // class. type RestoreRequest struct { - // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' + // ObjectType - Possible values include: 'ObjectTypeRestoreRequest', 'ObjectTypeAzureFileShareRestoreRequest', 'ObjectTypeAzureWorkloadPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSAPHanaRestoreRequest', 'ObjectTypeAzureWorkloadSQLPointInTimeRestoreRequest', 'ObjectTypeAzureWorkloadSQLRestoreRequest', 'ObjectTypeIaasVMRestoreRequest' ObjectType ObjectTypeBasicRestoreRequest `json:"objectType,omitempty"` } @@ -17465,6 +18944,10 @@ func unmarshalBasicRestoreRequest(body []byte) (BasicRestoreRequest, error) { var afsrr AzureFileShareRestoreRequest err := json.Unmarshal(body, &afsrr) return afsrr, err + case string(ObjectTypeAzureWorkloadPointInTimeRestoreRequest): + var awpitrr AzureWorkloadPointInTimeRestoreRequest + err := json.Unmarshal(body, &awpitrr) + return awpitrr, err case string(ObjectTypeAzureWorkloadRestoreRequest): var awrr AzureWorkloadRestoreRequest err := json.Unmarshal(body, &awrr) @@ -17529,6 +19012,11 @@ func (rr RestoreRequest) AsAzureFileShareRestoreRequest() (*AzureFileShareRestor return nil, false } +// AsAzureWorkloadPointInTimeRestoreRequest is the BasicRestoreRequest implementation for RestoreRequest. +func (rr RestoreRequest) AsAzureWorkloadPointInTimeRestoreRequest() (*AzureWorkloadPointInTimeRestoreRequest, bool) { + return nil, false +} + // AsAzureWorkloadRestoreRequest is the BasicRestoreRequest implementation for RestoreRequest. func (rr RestoreRequest) AsAzureWorkloadRestoreRequest() (*AzureWorkloadRestoreRequest, bool) { return nil, false @@ -17708,7 +19196,7 @@ func (rrr *RestoreRequestResource) UnmarshalJSON(body []byte) error { // RetentionDuration retention duration. type RetentionDuration struct { // Count - Count of duration types. Retention duration is obtained by the counting the duration type Count times. - // For example, when Count = 3 and DurationType = Weeks, retention duration will be three weeks. + // For example, when Count = 3 and DurationType = Weeks, retention duration will be three weeks. Count *int32 `json:"count,omitempty"` // DurationType - Retention duration type of retention policy. Possible values include: 'RetentionDurationTypeInvalid', 'RetentionDurationTypeDays', 'RetentionDurationTypeWeeks', 'RetentionDurationTypeMonths', 'RetentionDurationTypeYears' DurationType RetentionDurationType `json:"durationType,omitempty"` @@ -17899,7 +19387,7 @@ type Settings struct { // Issqlcompression - SQL compression flag Issqlcompression *bool `json:"issqlcompression,omitempty"` // IsCompression - Workload compression flag. This has been added so that 'isSqlCompression' - // will be deprecated once clients upgrade to consider this flag. + // will be deprecated once clients upgrade to consider this flag. IsCompression *bool `json:"isCompression,omitempty"` } @@ -18029,7 +19517,7 @@ type SQLDataDirectoryMapping struct { // StatusRequest backupStatus request. type StatusRequest struct { - // ResourceType - Container Type - VM, SQLPaaS, DPM, AzureFileShare. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase' + // ResourceType - Container Type - VM, SQLPaaS, DPM, AzureFileShare. Possible values include: 'DataSourceTypeInvalid', 'DataSourceTypeVM', 'DataSourceTypeFileFolder', 'DataSourceTypeAzureSQLDb', 'DataSourceTypeSQLDB', 'DataSourceTypeExchange', 'DataSourceTypeSharepoint', 'DataSourceTypeVMwareVM', 'DataSourceTypeSystemState', 'DataSourceTypeClient', 'DataSourceTypeGenericDataSource', 'DataSourceTypeSQLDataBase', 'DataSourceTypeAzureFileShare', 'DataSourceTypeSAPHanaDatabase', 'DataSourceTypeSAPAseDatabase' ResourceType DataSourceType `json:"resourceType,omitempty"` // ResourceID - Entire ARM resource id of the resource ResourceID *string `json:"resourceId,omitempty"` @@ -18042,6 +19530,8 @@ type StatusResponse struct { autorest.Response `json:"-"` // ProtectionStatus - Specifies whether the container is registered or not. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionStatus ProtectionStatus `json:"protectionStatus,omitempty"` + // VaultID - Specifies the arm resource id of the vault + VaultID *string `json:"vaultId,omitempty"` // FabricName - Specifies the fabric name - Azure or AD. Possible values include: 'FabricNameInvalid', 'FabricNameAzure' FabricName FabricName `json:"fabricName,omitempty"` // ContainerName - Specifies the product specific container name. E.g. iaasvmcontainer;iaasvmcontainer;csname;vmname. @@ -18466,6 +19956,8 @@ type WorkloadInquiryDetails struct { type BasicWorkloadItem interface { AsAzureVMWorkloadItem() (*AzureVMWorkloadItem, bool) AsBasicAzureVMWorkloadItem() (BasicAzureVMWorkloadItem, bool) + AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) + AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) AsAzureVMWorkloadSAPHanaSystemWorkloadItem() (*AzureVMWorkloadSAPHanaSystemWorkloadItem, bool) AsAzureVMWorkloadSQLDatabaseWorkloadItem() (*AzureVMWorkloadSQLDatabaseWorkloadItem, bool) @@ -18483,7 +19975,7 @@ type WorkloadItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' + // WorkloadItemType - Possible values include: 'WorkloadItemTypeWorkloadItem', 'WorkloadItemTypeAzureVMWorkloadItem', 'WorkloadItemTypeSAPAseDatabase1', 'WorkloadItemTypeSAPAseSystem1', 'WorkloadItemTypeSAPHanaDatabase1', 'WorkloadItemTypeSAPHanaSystem1', 'WorkloadItemTypeSQLDataBase1', 'WorkloadItemTypeSQLInstance1' WorkloadItemType WorkloadItemTypeBasicWorkloadItem `json:"workloadItemType,omitempty"` } @@ -18499,6 +19991,14 @@ func unmarshalBasicWorkloadItem(body []byte) (BasicWorkloadItem, error) { var avwi AzureVMWorkloadItem err := json.Unmarshal(body, &avwi) return avwi, err + case string(WorkloadItemTypeSAPAseDatabase1): + var avwsadwi AzureVMWorkloadSAPAseDatabaseWorkloadItem + err := json.Unmarshal(body, &avwsadwi) + return avwsadwi, err + case string(WorkloadItemTypeSAPAseSystem1): + var avwsaswi AzureVMWorkloadSAPAseSystemWorkloadItem + err := json.Unmarshal(body, &avwsaswi) + return avwsaswi, err case string(WorkloadItemTypeSAPHanaDatabase1): var avwshdwi AzureVMWorkloadSAPHanaDatabaseWorkloadItem err := json.Unmarshal(body, &avwshdwi) @@ -18572,6 +20072,16 @@ func (wi WorkloadItem) AsBasicAzureVMWorkloadItem() (BasicAzureVMWorkloadItem, b return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseWorkloadItem is the BasicWorkloadItem implementation for WorkloadItem. +func (wi WorkloadItem) AsAzureVMWorkloadSAPAseDatabaseWorkloadItem() (*AzureVMWorkloadSAPAseDatabaseWorkloadItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemWorkloadItem is the BasicWorkloadItem implementation for WorkloadItem. +func (wi WorkloadItem) AsAzureVMWorkloadSAPAseSystemWorkloadItem() (*AzureVMWorkloadSAPAseSystemWorkloadItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem is the BasicWorkloadItem implementation for WorkloadItem. func (wi WorkloadItem) AsAzureVMWorkloadSAPHanaDatabaseWorkloadItem() (*AzureVMWorkloadSAPHanaDatabaseWorkloadItem, bool) { return nil, false @@ -18876,6 +20386,8 @@ type BasicWorkloadProtectableItem interface { AsAzureIaaSComputeVMProtectableItem() (*AzureIaaSComputeVMProtectableItem, bool) AsAzureVMWorkloadProtectableItem() (*AzureVMWorkloadProtectableItem, bool) AsBasicAzureVMWorkloadProtectableItem() (BasicAzureVMWorkloadProtectableItem, bool) + AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) + AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) AsAzureVMWorkloadSAPHanaSystemProtectableItem() (*AzureVMWorkloadSAPHanaSystemProtectableItem, bool) AsAzureVMWorkloadSQLAvailabilityGroupProtectableItem() (*AzureVMWorkloadSQLAvailabilityGroupProtectableItem, bool) @@ -18897,7 +20409,7 @@ type WorkloadProtectableItem struct { FriendlyName *string `json:"friendlyName,omitempty"` // ProtectionState - State of the back up item. Possible values include: 'ProtectionStatusInvalid', 'ProtectionStatusNotProtected', 'ProtectionStatusProtecting', 'ProtectionStatusProtected', 'ProtectionStatusProtectionFailed' ProtectionState ProtectionStatus `json:"protectionState,omitempty"` - // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' + // ProtectableItemType - Possible values include: 'ProtectableItemTypeWorkloadProtectableItem', 'ProtectableItemTypeAzureFileShare', 'ProtectableItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectableItemTypeMicrosoftComputevirtualMachines', 'ProtectableItemTypeAzureVMWorkloadProtectableItem', 'ProtectableItemTypeSAPAseDatabase', 'ProtectableItemTypeSAPAseSystem', 'ProtectableItemTypeSAPHanaDatabase', 'ProtectableItemTypeSAPHanaSystem', 'ProtectableItemTypeSQLAvailabilityGroupContainer', 'ProtectableItemTypeSQLDataBase', 'ProtectableItemTypeSQLInstance', 'ProtectableItemTypeIaaSVMProtectableItem' ProtectableItemType ProtectableItemType `json:"protectableItemType,omitempty"` } @@ -18925,6 +20437,14 @@ func unmarshalBasicWorkloadProtectableItem(body []byte) (BasicWorkloadProtectabl var avwpi AzureVMWorkloadProtectableItem err := json.Unmarshal(body, &avwpi) return avwpi, err + case string(ProtectableItemTypeSAPAseDatabase): + var avwsadpi AzureVMWorkloadSAPAseDatabaseProtectableItem + err := json.Unmarshal(body, &avwsadpi) + return avwsadpi, err + case string(ProtectableItemTypeSAPAseSystem): + var avwsaspi AzureVMWorkloadSAPAseSystemProtectableItem + err := json.Unmarshal(body, &avwsaspi) + return avwsaspi, err case string(ProtectableItemTypeSAPHanaDatabase): var avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectableItem err := json.Unmarshal(body, &avwshdpi) @@ -19021,6 +20541,16 @@ func (wpi WorkloadProtectableItem) AsBasicAzureVMWorkloadProtectableItem() (Basi return nil, false } +// AsAzureVMWorkloadSAPAseDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for WorkloadProtectableItem. +func (wpi WorkloadProtectableItem) AsAzureVMWorkloadSAPAseDatabaseProtectableItem() (*AzureVMWorkloadSAPAseDatabaseProtectableItem, bool) { + return nil, false +} + +// AsAzureVMWorkloadSAPAseSystemProtectableItem is the BasicWorkloadProtectableItem implementation for WorkloadProtectableItem. +func (wpi WorkloadProtectableItem) AsAzureVMWorkloadSAPAseSystemProtectableItem() (*AzureVMWorkloadSAPAseSystemProtectableItem, bool) { + return nil, false +} + // AsAzureVMWorkloadSAPHanaDatabaseProtectableItem is the BasicWorkloadProtectableItem implementation for WorkloadProtectableItem. func (wpi WorkloadProtectableItem) AsAzureVMWorkloadSAPHanaDatabaseProtectableItem() (*AzureVMWorkloadSAPHanaDatabaseProtectableItem, bool) { return nil, false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/protectablecontainers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/protectablecontainers.go index fe7feea3f318..6a63dbccc292 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/protectablecontainers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/protectablecontainers.go @@ -44,7 +44,6 @@ func NewProtectableContainersClientWithBaseURI(baseURI string, subscriptionID st // Parameters: // vaultName - the name of the recovery services vault. // resourceGroupName - the name of the resource group where the recovery services vault is present. -// fabricName - fabric name associated with the container. // filter - oData filter options. func (client ProtectableContainersClient) List(ctx context.Context, vaultName string, resourceGroupName string, fabricName string, filter string) (result ProtectableContainerResourceListPage, err error) { if tracing.IsEnabled() { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/resourcestorageconfigs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/resourcestorageconfigs.go index 11978bf5725e..be50859235d6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/resourcestorageconfigs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup/resourcestorageconfigs.go @@ -117,14 +117,14 @@ func (client ResourceStorageConfigsClient) GetResponder(resp *http.Response) (re return } -// Update updates vault storage model type. +// Patch updates vault storage model type. // Parameters: // vaultName - the name of the recovery services vault. // resourceGroupName - the name of the resource group where the recovery services vault is present. // parameters - vault storage config request -func (client ResourceStorageConfigsClient) Update(ctx context.Context, vaultName string, resourceGroupName string, parameters ResourceConfigResource) (result autorest.Response, err error) { +func (client ResourceStorageConfigsClient) Patch(ctx context.Context, vaultName string, resourceGroupName string, parameters ResourceConfigResource) (result autorest.Response, err error) { if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ResourceStorageConfigsClient.Update") + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceStorageConfigsClient.Patch") defer func() { sc := -1 if result.Response != nil { @@ -133,6 +133,85 @@ func (client ResourceStorageConfigsClient) Update(ctx context.Context, vaultName tracing.EndSpan(ctx, sc, err) }() } + req, err := client.PatchPreparer(ctx, vaultName, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "backup.ResourceStorageConfigsClient", "Patch", nil, "Failure preparing request") + return + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "backup.ResourceStorageConfigsClient", "Patch", resp, "Failure sending request") + return + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "backup.ResourceStorageConfigsClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client ResourceStorageConfigsClient) PatchPreparer(ctx context.Context, vaultName string, resourceGroupName string, parameters ResourceConfigResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2016-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceStorageConfigsClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client ResourceStorageConfigsClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates vault storage model type. +// Parameters: +// vaultName - the name of the recovery services vault. +// resourceGroupName - the name of the resource group where the recovery services vault is present. +// parameters - vault storage config request +func (client ResourceStorageConfigsClient) Update(ctx context.Context, vaultName string, resourceGroupName string, parameters ResourceConfigResource) (result ResourceConfigResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceStorageConfigsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } req, err := client.UpdatePreparer(ctx, vaultName, resourceGroupName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "backup.ResourceStorageConfigsClient", "Update", nil, "Failure preparing request") @@ -141,7 +220,7 @@ func (client ResourceStorageConfigsClient) Update(ctx context.Context, vaultName resp, err := client.UpdateSender(req) if err != nil { - result.Response = resp + result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "backup.ResourceStorageConfigsClient", "Update", resp, "Failure sending request") return } @@ -169,7 +248,7 @@ func (client ResourceStorageConfigsClient) UpdatePreparer(ctx context.Context, v preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), + autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupstorageconfig/vaultstorageconfig", pathParameters), autorest.WithJSON(parameters), @@ -186,12 +265,13 @@ func (client ResourceStorageConfigsClient) UpdateSender(req *http.Request) (*htt // UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. -func (client ResourceStorageConfigsClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ResourceStorageConfigsClient) UpdateResponder(resp *http.Response) (result ResourceConfigResource, err error) { err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go index cfbb59588494..f96329e6c249 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go @@ -411,7 +411,7 @@ func (client DisasterRecoveryConfigsClient) DeleteResponder(resp *http.Response) return } -// FailOver envokes GEO DR failover and reconfigure the alias to point to the secondary namespace +// FailOver invokes GEO DR failover and reconfigure the alias to point to the secondary namespace // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name @@ -599,7 +599,7 @@ func (client DisasterRecoveryConfigsClient) GetResponder(resp *http.Response) (r // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // alias - the Disaster Recovery configuration name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client DisasterRecoveryConfigsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.GetAuthorizationRule") @@ -951,7 +951,7 @@ func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesComplete(ctx c // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // alias - the Disaster Recovery configuration name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client DisasterRecoveryConfigsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (result AccessKeys, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListKeys") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go index 2482262ae9f1..208f2c86cfcb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go @@ -42,7 +42,7 @@ func NewMigrationConfigsClientWithBaseURI(baseURI string, subscriptionID string) } // CompleteMigration this operation Completes Migration of entities by pointing the connection strings to Premium -// namespace and any enties created after the operation will be under Premium Namespace. CompleteMigration operation +// namespace and any entities created after the operation will be under Premium Namespace. CompleteMigration operation // will fail when entity migration is in-progress. // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. @@ -130,7 +130,7 @@ func (client MigrationConfigsClient) CompleteMigrationResponder(resp *http.Respo return } -// CreateAndStartMigration creates Migration configuration and starts migration of enties from Standard to Premium +// CreateAndStartMigration creates Migration configuration and starts migration of entities from Standard to Premium // namespace // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go index 25ca0363c952..f4d7a295dc55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go @@ -515,9 +515,9 @@ type ArmDisasterRecoveryProperties struct { ProvisioningState ProvisioningStateDR `json:"provisioningState,omitempty"` // PendingReplicationOperationsCount - Number of entities pending to be replicated. PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"` - // PartnerNamespace - ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairning + // PartnerNamespace - ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairing PartnerNamespace *string `json:"partnerNamespace,omitempty"` - // AlternateName - Primary/Secondary eventhub namespace name, which is part of GEO DR pairning + // AlternateName - Primary/Secondary eventhub namespace name, which is part of GEO DR pairing AlternateName *string `json:"alternateName,omitempty"` // Role - role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary'. Possible values include: 'Primary', 'PrimaryNotReplicating', 'Secondary' Role RoleDisasterRecovery `json:"role,omitempty"` @@ -545,7 +545,7 @@ type CaptureDescription struct { // CheckNameAvailability description of a Check Name availability request properties. type CheckNameAvailability struct { - // Name - The Name to check the namespce name availability and The namespace name can contain only letters, numbers, and hyphens. The namespace must start with a letter, and it must end with a letter or number. + // Name - The Name to check the namespace name availability and The namespace name can contain only letters, numbers, and hyphens. The namespace must start with a letter, and it must end with a letter or number. Name *string `json:"name,omitempty"` } @@ -624,7 +624,7 @@ func (cf CorrelationFilter) MarshalJSON() ([]byte, error) { type Destination struct { // Name - Name for capture destination Name *string `json:"name,omitempty"` - // DestinationProperties - Properties describing the storage account, blob container and acrchive name format for capture destination + // DestinationProperties - Properties describing the storage account, blob container and archive name format for capture destination *DestinationProperties `json:"properties,omitempty"` } @@ -673,7 +673,7 @@ func (d *Destination) UnmarshalJSON(body []byte) error { return nil } -// DestinationProperties properties describing the storage account, blob container and acrchive name format +// DestinationProperties properties describing the storage account, blob container and archive name format // for capture destination type DestinationProperties struct { // StorageAccountResourceID - Resource id of the storage account to be used to create the blobs @@ -684,7 +684,7 @@ type DestinationProperties struct { ArchiveNameFormat *string `json:"archiveNameFormat,omitempty"` } -// ErrorResponse error reponse indicates ServiceBus service is not able to process the incoming request. +// ErrorResponse error response indicates ServiceBus service is not able to process the incoming request. // The reason is provided in the error message. type ErrorResponse struct { // Code - Error code. @@ -1830,7 +1830,7 @@ type PremiumMessagingRegionsProperties struct { } // RegenerateAccessKeyParameters parameters supplied to the Regenerate Authorization Rule operation, -// specifies which key neeeds to be reset. +// specifies which key needs to be reset. type RegenerateAccessKeyParameters struct { // KeyType - The access key to regenerate. Possible values include: 'PrimaryKey', 'SecondaryKey' KeyType KeyType `json:"keyType,omitempty"` @@ -2361,7 +2361,7 @@ type SBAuthorizationRuleProperties struct { // SBNamespace description of a namespace resource. type SBNamespace struct { autorest.Response `json:"-"` - // Sku - Porperties of Sku + // Sku - Properties of Sku Sku *SBSku `json:"sku,omitempty"` // SBNamespaceProperties - Properties of the namespace. *SBNamespaceProperties `json:"properties,omitempty"` @@ -2650,7 +2650,7 @@ type SBNamespaceProperties struct { // SBNamespaceUpdateParameters description of a namespace resource. type SBNamespaceUpdateParameters struct { - // Sku - Porperties of Sku + // Sku - Properties of Sku Sku *SBSku `json:"sku,omitempty"` // SBNamespaceProperties - Properties of the namespace. *SBNamespaceProperties `json:"properties,omitempty"` @@ -3549,7 +3549,7 @@ func NewSBTopicListResultPage(getNextPage func(context.Context, SBTopicListResul return SBTopicListResultPage{fn: getNextPage} } -// SBTopicProperties the Tpoic Properties definition. +// SBTopicProperties the Topic Properties definition. type SBTopicProperties struct { // SizeInBytes - Size of the topic, in bytes. SizeInBytes *int64 `json:"sizeInBytes,omitempty"` @@ -3561,7 +3561,7 @@ type SBTopicProperties struct { AccessedAt *date.Time `json:"accessedAt,omitempty"` // SubscriptionCount - Number of subscriptions. SubscriptionCount *int32 `json:"subscriptionCount,omitempty"` - // CountDetails - Message count deatils + // CountDetails - Message count details CountDetails *MessageCountDetails `json:"countDetails,omitempty"` // DefaultMessageTimeToLive - ISO 8601 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself. DefaultMessageTimeToLive *string `json:"defaultMessageTimeToLive,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go index 577a3114dcc4..94fec92556a0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go @@ -215,7 +215,7 @@ func (client NamespacesClient) CreateOrUpdateResponder(resp *http.Response) (res // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. // parameters - the shared access authorization rule. func (client NamespacesClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SBAuthorizationRule) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { @@ -399,7 +399,7 @@ func (client NamespacesClient) DeleteResponder(resp *http.Response) (result auto // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client NamespacesClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.DeleteAuthorizationRule") @@ -577,7 +577,7 @@ func (client NamespacesClient) GetResponder(resp *http.Response) (result SBNames // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client NamespacesClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.GetAuthorizationRule") @@ -1024,7 +1024,7 @@ func (client NamespacesClient) ListByResourceGroupComplete(ctx context.Context, // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client NamespacesClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result AccessKeys, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListKeys") @@ -1205,7 +1205,7 @@ func (client NamespacesClient) MigrateResponder(resp *http.Response) (result aut // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. // parameters - parameters supplied to regenerate the authorization rule. func (client NamespacesClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) { if tracing.IsEnabled() { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go index 37cd451fdf3e..983277f01e23 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go @@ -140,7 +140,7 @@ func (client QueuesClient) CreateOrUpdateResponder(resp *http.Response) (result // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // queueName - the queue name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. // parameters - the shared access authorization rule. func (client QueuesClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { @@ -332,7 +332,7 @@ func (client QueuesClient) DeleteResponder(resp *http.Response) (result autorest // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // queueName - the queue name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client QueuesClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.DeleteAuthorizationRule") @@ -518,7 +518,7 @@ func (client QueuesClient) GetResponder(resp *http.Response) (result SBQueue, er // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // queueName - the queue name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client QueuesClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.GetAuthorizationRule") @@ -888,7 +888,7 @@ func (client QueuesClient) ListByNamespaceComplete(ctx context.Context, resource // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // queueName - the queue name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client QueuesClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result AccessKeys, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.ListKeys") @@ -984,7 +984,7 @@ func (client QueuesClient) ListKeysResponder(resp *http.Response) (result Access // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // queueName - the queue name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. // parameters - parameters supplied to regenerate the authorization rule. func (client QueuesClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) { if tracing.IsEnabled() { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go index 9c80caeceb8b..d88386727adc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go @@ -135,12 +135,12 @@ func (client TopicsClient) CreateOrUpdateResponder(resp *http.Response) (result return } -// CreateOrUpdateAuthorizationRule creates an authorizatio rule for the specified topic. +// CreateOrUpdateAuthorizationRule creates an authorization rule for the specified topic. // Parameters: // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // topicName - the topic name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. // parameters - the shared access authorization rule. func (client TopicsClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters SBAuthorizationRule) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { @@ -332,7 +332,7 @@ func (client TopicsClient) DeleteResponder(resp *http.Response) (result autorest // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // topicName - the topic name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client TopicsClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.DeleteAuthorizationRule") @@ -518,7 +518,7 @@ func (client TopicsClient) GetResponder(resp *http.Response) (result SBTopic, er // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // topicName - the topic name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client TopicsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result SBAuthorizationRule, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.GetAuthorizationRule") @@ -888,7 +888,7 @@ func (client TopicsClient) ListByNamespaceComplete(ctx context.Context, resource // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // topicName - the topic name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. func (client TopicsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result AccessKeys, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.ListKeys") @@ -984,7 +984,7 @@ func (client TopicsClient) ListKeysResponder(resp *http.Response) (result Access // resourceGroupName - name of the Resource group within the Azure subscription. // namespaceName - the namespace name // topicName - the topic name. -// authorizationRuleName - the authorizationrule name. +// authorizationRuleName - the authorization rule name. // parameters - parameters supplied to regenerate the authorization rule. func (client TopicsClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) { if tracing.IsEnabled() { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go index 9f0711c3973e..88435befb8d3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go @@ -896,8 +896,8 @@ type CheckNameAvailabilityResult struct { type CustomDomain struct { // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source. Name *string `json:"name,omitempty"` - // UseSubDomain - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. - UseSubDomain *bool `json:"useSubDomain,omitempty"` + // UseSubDomainName - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. + UseSubDomainName *bool `json:"useSubDomainName,omitempty"` } // Dimension dimension of blobs, possibly be blob type or access tier. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/apps.go b/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/apps.go index bfa8e0339e2e..60ba5cfbb0c9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/apps.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/apps.go @@ -3089,7 +3089,7 @@ func (client AppsClient) CreateOrUpdateSwiftVirtualNetworkConnectionPreparer(ctx autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkConfig/virtualNetwork", pathParameters), autorest.WithJSON(connectionEnvelope), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) @@ -3183,7 +3183,7 @@ func (client AppsClient) CreateOrUpdateSwiftVirtualNetworkConnectionSlotPreparer autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkConfig/virtualNetwork", pathParameters), autorest.WithJSON(connectionEnvelope), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) @@ -6465,7 +6465,7 @@ func (client AppsClient) DeleteSwiftVirtualNetworkPreparer(ctx context.Context, preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkConfig/virtualNetwork", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } @@ -6552,7 +6552,7 @@ func (client AppsClient) DeleteSwiftVirtualNetworkSlotPreparer(ctx context.Conte preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkConfig/virtualNetwork", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } @@ -14240,7 +14240,7 @@ func (client AppsClient) GetSwiftVirtualNetworkConnectionPreparer(ctx context.Co preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkConfig/virtualNetwork", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } @@ -14328,7 +14328,7 @@ func (client AppsClient) GetSwiftVirtualNetworkConnectionSlotPreparer(ctx contex preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkConfig/virtualNetwork", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } @@ -32905,7 +32905,7 @@ func (client AppsClient) UpdateSwiftVirtualNetworkConnectionPreparer(ctx context autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkConfig/virtualNetwork", pathParameters), autorest.WithJSON(connectionEnvelope), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) @@ -32999,7 +32999,7 @@ func (client AppsClient) UpdateSwiftVirtualNetworkConnectionSlotPreparer(ctx con autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/virtualNetwork", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkConfig/virtualNetwork", pathParameters), autorest.WithJSON(connectionEnvelope), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/models.go index 968e1b0bb715..5641daaffd90 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web/models.go @@ -6892,6 +6892,10 @@ type CorsSettings struct { // AllowedOrigins - Gets or sets the list of origins that should be allowed to make cross-origin // calls (for example: http://example.com:12345). Use "*" to allow all. AllowedOrigins *[]string `json:"allowedOrigins,omitempty"` + // SupportCredentials - Gets or sets whether CORS requests with credentials are allowed. See + // https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Requests_with_credentials + // for more details. + SupportCredentials *bool `json:"supportCredentials,omitempty"` } // CsmMoveResourceEnvelope object with a list of the resources that need to be moved and the resource group @@ -17556,6 +17560,9 @@ type SiteAuthSettingsProperties struct { // Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users. // More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html ClientSecret *string `json:"clientSecret,omitempty"` + // ClientSecretCertificateThumbprint - An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as + // a replacement for the Client Secret. It is also optional. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty"` // Issuer - The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application. // When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. // This URI is a case-sensitive identifier for the token issuer. @@ -17732,8 +17739,12 @@ type SiteConfig struct { ManagedServiceIdentityID *int32 `json:"managedServiceIdentityId,omitempty"` // XManagedServiceIdentityID - Explicit Managed Service Identity Id XManagedServiceIdentityID *int32 `json:"xManagedServiceIdentityId,omitempty"` - // IPSecurityRestrictions - IP security restrictions. + // IPSecurityRestrictions - IP security restrictions for main. IPSecurityRestrictions *[]IPSecurityRestriction `json:"ipSecurityRestrictions,omitempty"` + // ScmIPSecurityRestrictions - IP security restrictions for scm. + ScmIPSecurityRestrictions *[]IPSecurityRestriction `json:"scmIpSecurityRestrictions,omitempty"` + // ScmIPSecurityRestrictionsUseMain - IP security restrictions for scm to use main. + ScmIPSecurityRestrictionsUseMain *bool `json:"scmIpSecurityRestrictionsUseMain,omitempty"` // HTTP20Enabled - Http20Enabled: configures a web site to allow clients to connect over http2.0 HTTP20Enabled *bool `json:"http20Enabled,omitempty"` // MinTLSVersion - MinTlsVersion: configures the minimum version of TLS required for SSL requests. Possible values include: 'OneFullStopZero', 'OneFullStopOne', 'OneFullStopTwo' @@ -17889,6 +17900,12 @@ func (sc SiteConfig) MarshalJSON() ([]byte, error) { if sc.IPSecurityRestrictions != nil { objectMap["ipSecurityRestrictions"] = sc.IPSecurityRestrictions } + if sc.ScmIPSecurityRestrictions != nil { + objectMap["scmIpSecurityRestrictions"] = sc.ScmIPSecurityRestrictions + } + if sc.ScmIPSecurityRestrictionsUseMain != nil { + objectMap["scmIpSecurityRestrictionsUseMain"] = sc.ScmIPSecurityRestrictionsUseMain + } if sc.HTTP20Enabled != nil { objectMap["http20Enabled"] = sc.HTTP20Enabled } @@ -20626,6 +20643,8 @@ type StackMajorVersion struct { IsDefault *bool `json:"isDefault,omitempty"` // MinorVersions - Minor versions associated with the major version. MinorVersions *[]StackMinorVersion `json:"minorVersions,omitempty"` + // ApplicationInsights - true if this supports Application Insights; otherwise, false. + ApplicationInsights *bool `json:"applicationInsights,omitempty"` } // StackMinorVersion application stack minor version. @@ -23017,6 +23036,8 @@ type VnetInfoProperties struct { ResyncRequired *bool `json:"resyncRequired,omitempty"` // DNSServers - DNS servers to be used by this Virtual Network. This should be a comma-separated list of IP addresses. DNSServers *string `json:"dnsServers,omitempty"` + // IsSwift - Flag that is used to denote if this is VNET injection + IsSwift *bool `json:"isSwift,omitempty"` } // VnetParameters the required set of inputs to validate a VNET diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index 4a6bc30316e1..ac65757d150a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v24.0.0" +const Number = "v25.1.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 52ca378667df..effa87ab2fae 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -796,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource if err != nil { return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } - req.Header.Add("User-Agent", userAgent()) + req.Header.Add("User-Agent", UserAgent()) req = req.WithContext(ctx) if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { v := url.Values{} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go index 3944edf05109..c867b348439b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -30,6 +30,16 @@ var ( ) ) -func userAgent() string { +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { return ua } + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 67f1762c4fb9..eb1aa9e323e4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -19,7 +19,7 @@ import ( "runtime" ) -const number = "v11.3.2" +const number = "v11.4.0" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/modules.txt b/vendor/modules.txt index 661764292e77..50cdae53bf6c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,7 +1,8 @@ # contrib.go.opencensus.io/exporter/ocagent v0.4.1 contrib.go.opencensus.io/exporter/ocagent -# github.com/Azure/azure-sdk-for-go v24.0.0+incompatible +# github.com/Azure/azure-sdk-for-go v25.1.0+incompatible github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources +github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement github.com/Azure/azure-sdk-for-go/services/appinsights/mgmt/2015-05-01/insights github.com/Azure/azure-sdk-for-go/services/automation/mgmt/2015-10-31/automation github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2017-09-01/batch @@ -17,21 +18,21 @@ github.com/Azure/azure-sdk-for-go/services/datalake/analytics/mgmt/2016-11-01/ac github.com/Azure/azure-sdk-for-go/services/datalake/store/2016-11-01/filesystem github.com/Azure/azure-sdk-for-go/services/datalake/store/mgmt/2016-11-01/account github.com/Azure/azure-sdk-for-go/services/devtestlabs/mgmt/2016-05-15/dtl -github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac -github.com/Azure/azure-sdk-for-go/services/iothub/mgmt/2018-04-01/devices github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2018-02-14/keyvault github.com/Azure/azure-sdk-for-go/services/logic/mgmt/2016-06-01/logic +github.com/Azure/azure-sdk-for-go/services/mediaservices/mgmt/2018-07-01/media github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2017-12-01/mysql github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network github.com/Azure/azure-sdk-for-go/services/notificationhubs/mgmt/2017-04-01/notificationhubs github.com/Azure/azure-sdk-for-go/services/postgresql/mgmt/2017-12-01/postgresql -github.com/Azure/azure-sdk-for-go/services/preview/apimanagement/mgmt/2018-06-01-preview/apimanagement github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization github.com/Azure/azure-sdk-for-go/services/preview/devspaces/mgmt/2018-06-01-preview/devspaces github.com/Azure/azure-sdk-for-go/services/preview/dns/mgmt/2018-03-01-preview/dns +github.com/Azure/azure-sdk-for-go/services/preview/eventgrid/mgmt/2018-09-15-preview/eventgrid +github.com/Azure/azure-sdk-for-go/services/preview/iothub/mgmt/2018-12-01-preview/devices github.com/Azure/azure-sdk-for-go/services/preview/mariadb/mgmt/2018-06-01-preview/mariadb github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights github.com/Azure/azure-sdk-for-go/services/preview/msi/mgmt/2015-08-31-preview/msi @@ -60,7 +61,7 @@ github.com/Azure/azure-sdk-for-go/services/web/mgmt/2018-02-01/web github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/go-autorest v11.3.2+incompatible +# github.com/Azure/go-autorest v11.4.0+incompatible github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/autorest/azure diff --git a/website/azurerm.erb b/website/azurerm.erb index 5435c4d6531d..396b72a4004f 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -22,22 +22,26 @@
  • - > - Authentication + > + Guides @@ -55,10 +59,22 @@ > Data Sources @@ -809,6 +849,10 @@ > Messaging Resources - > - Azure Monitor for containers Resources + > + Log Analytics Resources @@ -1212,6 +1279,15 @@ + > + Media Resources + + + <% end %> diff --git a/website/docs/auth/azure_cli.html.markdown b/website/docs/auth/azure_cli.html.markdown index d1245c812abd..3d6471901e1e 100644 --- a/website/docs/auth/azure_cli.html.markdown +++ b/website/docs/auth/azure_cli.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Provider: Authenticating via the Azure CLI" -sidebar_current: "docs-azurerm-authentication-azure-cli" +sidebar_current: "docs-azurerm-guide-authentication-azure-cli" description: |- This guide will cover how to use the Azure CLI as authentication for the Azure Provider. @@ -86,7 +86,7 @@ To configure Terraform to use the Default Subscription defined in the Azure CLI ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" } ``` @@ -101,7 +101,7 @@ It's also possible to configure Terraform to use a specific Subscription - for e ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" subscription_id = "00000000-0000-0000-0000-000000000000" } @@ -118,7 +118,7 @@ If you're looking to use Terraform across Tenants - it's possible to do this by ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" subscription_id = "00000000-0000-0000-0000-000000000000" tenant_id = "11111111-1111-1111-1111-111111111111" diff --git a/website/docs/auth/managed_service_identity.html.markdown b/website/docs/auth/managed_service_identity.html.markdown index ce65eef431ad..ce31316fabb8 100644 --- a/website/docs/auth/managed_service_identity.html.markdown +++ b/website/docs/auth/managed_service_identity.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Provider: Authenticating via Managed Service Identity" -sidebar_current: "docs-azurerm-authentication-managed-service-identity" +sidebar_current: "docs-azurerm-guide-authentication-managed-service-identity" description: |- This guide will cover how to use Managed Service Identity as authentication for the Azure Provider. @@ -72,7 +72,7 @@ Whilst a Provider block is _technically_ optional when using Environment Variabl ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" } ``` @@ -87,7 +87,7 @@ It's also possible to configure Managed Service Identity within the Provider Blo ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" use_msi = true } diff --git a/website/docs/auth/service_principal_client_certificate.html.markdown b/website/docs/auth/service_principal_client_certificate.html.markdown index e051ef389de8..c8564a09597d 100644 --- a/website/docs/auth/service_principal_client_certificate.html.markdown +++ b/website/docs/auth/service_principal_client_certificate.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Provider: Authenticating via a Service Principal and a Client Certificate" -sidebar_current: "docs-azurerm-authentication-service-principal-client-certificate" +sidebar_current: "docs-azurerm-guide-authentication-service-principal-client-certificate" description: |- This guide will cover how to use a Service Principal (Shared Account) with a Client Certificate as authentication for the Azure Provider. @@ -105,7 +105,7 @@ The following Provider block can be specified - where `1.21.0` is the version of ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" } ``` @@ -125,7 +125,7 @@ variable "client_certificate_password" {} provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" subscription_id = "00000000-0000-0000-0000-000000000000" client_id = "00000000-0000-0000-0000-000000000000" diff --git a/website/docs/auth/service_principal_client_secret.html.markdown b/website/docs/auth/service_principal_client_secret.html.markdown index 309379d5d18e..d6e60bb946fb 100644 --- a/website/docs/auth/service_principal_client_secret.html.markdown +++ b/website/docs/auth/service_principal_client_secret.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Provider: Authenticating via a Service Principal and a Client Secret" -sidebar_current: "docs-azurerm-authentication-service-principal-client-secret" +sidebar_current: "docs-azurerm-guide-authentication-service-principal-client-secret" description: |- This guide will cover how to use a Service Principal (Shared Account) with a Client Secret as authentication for the Azure Provider. @@ -181,7 +181,7 @@ The following Provider block can be specified - where `1.21.0` is the version of ```hcl provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" } ``` @@ -200,7 +200,7 @@ variable "client_secret" {} provider "azurerm" { # Whilst version is optional, we /strongly recommend/ using it to pin the version of the Provider being used - version = "=1.21.0" + version = "=1.22.0" subscription_id = "00000000-0000-0000-0000-000000000000" client_id = "00000000-0000-0000-0000-000000000000" diff --git a/website/docs/d/api_management.html.markdown b/website/docs/d/api_management.html.markdown index 118bdcccaacd..6d0047141b0b 100644 --- a/website/docs/d/api_management.html.markdown +++ b/website/docs/d/api_management.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Resource Manager: azurerm_api_management" -sidebar_current: "docs-azurerm-datasource-api-management" +sidebar_current: "docs-azurerm-datasource-api-management-x" description: |- Gets information about an existing API Management Service. --- diff --git a/website/docs/d/api_management_group.html.markdown b/website/docs/d/api_management_group.html.markdown new file mode 100644 index 000000000000..032b61aef55e --- /dev/null +++ b/website/docs/d/api_management_group.html.markdown @@ -0,0 +1,45 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_api_management_group" +sidebar_current: "docs-azurerm-datasource-api-management-group" +description: |- + Gets information about an existing API Management Group. +--- + +# Data Source: azurerm_api_management_group + +Use this data source to access information about an existing API Management Group. + +## Example Usage + +```hcl +data "azurerm_api_management_group" "test" { + name = "my-group" + api_management_name = "example-apim + resource_group_name = "search-service" +} + +output "group_type" { + value = "${data.azurerm_api_management_group.test.type}" +} +``` + +## Argument Reference + +* `api_management_name` - (Required) The Name of the API Management Service in which this Group exists. + +* `name` - (Required) The Name of the API Management Group. + +* `resource_group_name` - (Required) The Name of the Resource Group in which the API Management Service exists. + +## Attributes Reference + +* `id` - The ID of the API Management Group. + +* `display_name` - The display name of this API Management Group. + +* `description` - The description of this API Management Group. + +* `external_id` - The identifier of the external Group. + +* `type` - The type of this API Management Group, such as `custom` or `external`. \ No newline at end of file diff --git a/website/docs/d/api_management_product.html.markdown b/website/docs/d/api_management_product.html.markdown new file mode 100644 index 000000000000..ed0fcb5091d8 --- /dev/null +++ b/website/docs/d/api_management_product.html.markdown @@ -0,0 +1,51 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_api_management_product" +sidebar_current: "docs-azurerm-datasource-api-management-product" +description: |- + Gets information about an existing API Management Product. +--- + +# Data Source: azurerm_api_management_product + +Use this data source to access information about an existing API Management Product. + +## Example Usage + +```hcl +data "azurerm_api_management_product" "test" { + product_id = "my-product" + api_management_name = "example-apim + resource_group_name = "search-service" +} + +output "product_terms" { + value = "${data.azurerm_api_management_product.test.terms}" +} +``` + +## Argument Reference + +* `api_management_name` - (Required) The Name of the API Management Service in which this Product exists. + +* `product_id` - (Required) The Identifier for the API Management Product. + +* `resource_group_name` - (Required) The Name of the Resource Group in which the API Management Service exists. + +## Attributes Reference + +* `id` - The ID of the API Management Product. + +* `approval_required` - Do subscribers need to be approved prior to being able to use the Product? + +* `display_name` - The Display Name for this API Management Product. + +* `published` - Is this Product Published? + +* `subscription_required` - Is a Subscription required to access API's included in this Product? + +* `description` - The description of this Product, which may include HTML formatting tags. + +* `subscriptions_limit` - The number of subscriptions a user can have to this Product at the same time. + +* `terms` - Any Terms and Conditions for this Product, which must be accepted by Developers before they can begin the Subscription process. diff --git a/website/docs/d/api_management_user.html.markdown b/website/docs/d/api_management_user.html.markdown new file mode 100644 index 000000000000..643d9f8184b0 --- /dev/null +++ b/website/docs/d/api_management_user.html.markdown @@ -0,0 +1,47 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_api_management_user" +sidebar_current: "docs-azurerm-datasource-api-management-user" +description: |- + Gets information about an existing API Management User. +--- + +# Data Source: azurerm_api_management_user + +Use this data source to access information about an existing API Management User. + +## Example Usage + +```hcl +data "azurerm_api_management_user" "test" { + user_id = "my-user" + api_management_name = "example-apim + resource_group_name = "search-service" +} + +output "notes" { + value = "${data.azurerm_api_management_user.test.notes}" +} +``` + +## Argument Reference + +* `api_management_name` - (Required) The Name of the API Management Service in which this User exists. + +* `resource_group_name` - (Required) The Name of the Resource Group in which the API Management Service exists. + +* `user_id` - (Required) The Identifier for the User. + +## Attributes Reference + +* `id` - The ID of the API Management User. + +* `first_name` - The First Name for the User. + +* `last_name` - The Last Name for the User. + +* `email` - The Email Address used for this User. + +* `note` - Any notes about this User. + +* `state` - The current state of this User, for example `active`, `blocked` or `pending`. \ No newline at end of file diff --git a/website/docs/d/app_service.html.markdown b/website/docs/d/app_service.html.markdown index 4d4ac87f6576..1c771a698874 100644 --- a/website/docs/d/app_service.html.markdown +++ b/website/docs/d/app_service.html.markdown @@ -47,6 +47,8 @@ output "app_service_id" { * `https_only` - Can the App Service only be accessed via HTTPS? +* `client_cert_enabled` - Does the App Service require client certificates for incoming requests? + * `site_config` - A `site_config` block as defined below. * `tags` - A mapping of tags to assign to the resource. diff --git a/website/docs/d/availability_set.html.markdown b/website/docs/d/availability_set.html.markdown new file mode 100644 index 000000000000..f29d5162c46f --- /dev/null +++ b/website/docs/d/availability_set.html.markdown @@ -0,0 +1,48 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_availability_set" +sidebar_current: "docs-azurerm-datasource-availability-set" +description: |- + Gets information about an existing Availability Set. +--- + +# Data Source: azurerm_availability_set + +Use this data source to access information about an existing Availability Set. + +## Example Usage + +```hcl +data "azurerm_availability_set" "test" { + name = "tf-appsecuritygroup" + resource_group_name = "my-resource-group" +} + +output "availability_set_id" { + value = "${data.azurerm_availability_set.test.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - The name of the Availability Set. + +* `resource_group_name` - The name of the resource group in which the Availability Set exists. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Availability Set. + +* `location` - The supported Azure location where the Availability Set exists. + +* `managed` - Whether the availability set is managed or not. + +* `platform_fault_domain_count` - The number of fault domains that are used. + +* `platform_update_domain_count` - The number of update domains that are used. + +* `tags` - A mapping of tags assigned to the resource. diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index b11461a0fe7b..8d3a40ce3d4c 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -43,6 +43,8 @@ The following attributes are exported: * `start_task` - A `start_task` block that describes the start task settings for the Batch pool. +* `max_tasks_per_node` - The maximum number of tasks that can run concurrently on a single compute node in the pool. + --- A `fixed_scale` block exports the following: diff --git a/website/docs/d/builtin_role_definition.markdown b/website/docs/d/builtin_role_definition.markdown index 998edacd4616..bc539d4865c0 100644 --- a/website/docs/d/builtin_role_definition.markdown +++ b/website/docs/d/builtin_role_definition.markdown @@ -10,6 +10,8 @@ description: |- Use this data source to access information about a built-in Role Definition. To access information about a custom Role Definition, [please see the `azurerm_role_definition` data source](role_definition.html) instead. +~> **NOTE:** The this datasource has been deprecated in favour of `azurerm_role_definition` that now can look up role definitions by name. As such this data source will be removed in version 2.0 of the AzureRM Provider. + ## Example Usage ```hcl diff --git a/website/docs/d/key_vault_key.html.markdown b/website/docs/d/key_vault_key.html.markdown index d09e1dc00628..db22ff096b35 100644 --- a/website/docs/d/key_vault_key.html.markdown +++ b/website/docs/d/key_vault_key.html.markdown @@ -33,7 +33,7 @@ The following arguments are supported: * `name` - (Required) Specifies the name of the Key Vault Key. -* `vault_uri` - (Required) Specifies the URI used to access the Key Vault instance, available on the `azurerm_key_vault` Data Source / Resource. +* `vault_uri` - (Required) Specifies the ID of the Key Vault Key Vault instance where the Key resides, available on the `azurerm_key_vault` Data Source / Resource. ## Attributes Reference diff --git a/website/docs/d/key_vault_secret.html.markdown b/website/docs/d/key_vault_secret.html.markdown index 954cf8a61530..fee5997351fe 100644 --- a/website/docs/d/key_vault_secret.html.markdown +++ b/website/docs/d/key_vault_secret.html.markdown @@ -33,7 +33,7 @@ The following arguments are supported: * `name` - (Required) Specifies the name of the Key Vault Secret. -* `vault_uri` - (Required) Specifies the URI used to access the Key Vault instance, available on the `azurerm_key_vault` Data Source / Resource. +* `vault_uri` - (Required) Specifies the ID of the Key Vault Key Vault instance where the Secret resides, available on the `azurerm_key_vault` Data Source / Resource. ## Attributes Reference diff --git a/website/docs/d/managed_disk.html.markdown b/website/docs/d/managed_disk.html.markdown index 3820416e4b40..dc6cea981645 100644 --- a/website/docs/d/managed_disk.html.markdown +++ b/website/docs/d/managed_disk.html.markdown @@ -91,7 +91,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "staging" } } diff --git a/website/docs/d/network_watcher.html.markdown b/website/docs/d/network_watcher.html.markdown new file mode 100644 index 000000000000..a69d7a1f4059 --- /dev/null +++ b/website/docs/d/network_watcher.html.markdown @@ -0,0 +1,38 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_network_watcher" +sidebar_current: "docs-azurerm-datasource-network-watcher" +description: |- + Gets information about an existing Network Watcher. +--- + +# Data Source: azurerm_network_watcher + +Use this data source to access information about an existing Network Watcher. + +## Example Usage + +```hcl +data "azurerm_network_watcher" "test" { + name = "${azurerm_network_watcher.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +output "network_watcher_id" { + value = "${data.azurerm_network_watcher.test.id}" +} +``` + +## Argument Reference + +* `name` - (Required) Specifies the Name of the Network Watcher. +* `resource_group_name` - (Required) Specifies the Name of the Resource Group within which the Network Watcher exists. + + +## Attributes Reference + +* `id` - The ID of the Network Watcher. + +* `location` - The supported Azure location where the resource exists. + +* `tags` - A mapping of tags assigned to the resource. diff --git a/website/docs/d/policy_definition.markdown b/website/docs/d/policy_definition.markdown new file mode 100644 index 000000000000..a51c6d33aa5b --- /dev/null +++ b/website/docs/d/policy_definition.markdown @@ -0,0 +1,40 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_policy_definition" +sidebar_current: "docs-azurerm-datasource-policy-definition" +description: |- + Get information about a Policy Definition. +--- + +# Data Source: azurerm_policy_definition + +Use this data source to access information about a Policy Definition, both custom and built in. Retrieves Policy Definitions from your current subscription by default. + +## Example Usage + +```hcl +data "azurerm_policy_definition" "test" { + display_name = "Allowed resource types" +} + +output "id" { + value = "${data.azurerm_policy_definition.test.id}" +} +``` + +## Argument Reference + +* `display_name` - (Required) Specifies the name of the Policy Definition. +* `management_group_id` - (Optional) Only retrieve Policy Definitions from this Management Group. + + +## Attributes Reference + +* `id` - The ID of the Policy Definition. +* `name` - The Name of the Policy Definition. +* `type` - The Type of Policy. +* `description` - The Description of the Policy. +* `policy_type` - The Type of the Policy, such as `Microsoft.Authorization/policyDefinitions`. +* `policy_rule` - The Rule as defined (in JSON) in the Policy. +* `parameters` - Any Parameters defined in the Policy. +* `metadata` - Any Metadata defined in the Policy. diff --git a/website/docs/d/public_ip.html.markdown b/website/docs/d/public_ip.html.markdown index 702aa504926f..7960e629ccad 100644 --- a/website/docs/d/public_ip.html.markdown +++ b/website/docs/d/public_ip.html.markdown @@ -57,7 +57,7 @@ resource "azurerm_public_ip" "test" { allocation_method = "Dynamic" idle_timeout_in_minutes = 30 - tags { + tags = { environment = "test" } } diff --git a/website/docs/d/role_definition.markdown b/website/docs/d/role_definition.markdown index 83def850266a..e81f1aeff167 100644 --- a/website/docs/d/role_definition.markdown +++ b/website/docs/d/role_definition.markdown @@ -3,33 +3,60 @@ layout: "azurerm" page_title: "Azure Resource Manager: azurerm_role_definition" sidebar_current: "docs-azurerm-datasource-role-definition" description: |- - Get information about an existing Custom Role Definition. + Get information about an existing Role Definition. --- # Data Source: azurerm_role_definition -Use this data source to access information about an existing Custom Role Definition. To access information about a built-in Role Definition, [please see the `azurerm_builtin_role_definition` data source](builtin_role_definition.html) instead. +Use this data source to access information about an existing Role Definition. ## Example Usage ```hcl data "azurerm_subscription" "primary" {} -data "azurerm_role_definition" "custom" { +resource "azurerm_role_definition" "custom" { role_definition_id = "00000000-0000-0000-0000-000000000000" + name = "CustomRoleDef" + scope = "${data.azurerm_subscription.primary.id}" + + #... +} + +data "azurerm_role_definition" "custom" { + role_definition_id = "${azurerm_role_definition.custom.role_definition_id}" scope = "${data.azurerm_subscription.primary.id}" # /subscriptions/00000000-0000-0000-0000-000000000000 } +data "azurerm_role_definition" "custom-byname" { + name = "${azurerm_role_definition.custom.name}" + scope = "${data.azurerm_subscription.primary.id}" +} + +data "azurerm_builtin_role_definition" "builtin" { + name = "Contributor" +} + output "custom_role_definition_id" { value = "${data.azurerm_role_definition.custom.id}" } +output "contributor_role_definition_id" { + value = "${data.azurerm_role_definition.builtin.id}" +} ``` + + ## Argument Reference -* `role_definition_id` - (Required) Specifies the ID of the Role Definition as a UUID/GUID. +* `name` - (Optional) Specifies the Name of either a built-in or custom Role Definition. + +-> You can also use this for built-in roles such as `Contributor`, `Owner`, `Reader` and `Virtual Machine Contributor` + +* `role_definition_id` - (Optional) Specifies the ID of the Role Definition as a UUID/GUID. +* `scope` - (Optional) Specifies the Scope at which the Custom Role Definition exists. -* `scope` - (Required) Specifies the Scope at which the Custom Role Definition exists. +~> **NOTE:** One of `name` or `role_definition_id` must be specified. ## Attributes Reference diff --git a/website/docs/d/servicebus_namespace.html.markdown b/website/docs/d/servicebus_namespace.html.markdown new file mode 100644 index 000000000000..5592efae989b --- /dev/null +++ b/website/docs/d/servicebus_namespace.html.markdown @@ -0,0 +1,53 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_servicebus_namespace" +sidebar_current: "docs-azurerm-datasource-servicebus-namespace" +description: |- + Gets information about an existing ServiceBus Namespace. +--- + +# Data Source: azurerm_servicebus_namespace + +Use this data source to access information about an existing ServiceBus Namespace. + +## Example Usage + +```hcl +data "azurerm_servicebus_namespace" "test" { + name = "examplenamespace" + resource_group_name = "example-resources" +} + +output "location" { + value = "${data.azurerm_servicebus_namespace.test.location}" +} +``` + +## Argument Reference + +* `name` - (Required) Specifies the name of the ServiceBus Namespace. + +* `resource_group_name` - (Required) Specifies the name of the Resource Group where the ServiceBus Namespace exists. + +## Attributes Reference + +* `location` - The location of the Resource Group in which the ServiceBus Namespace exists. + +* `sku` - The Tier used for the ServiceBus Namespace. + +* `capacity` - The capacity of the ServiceBus Namespace. + +* `tags` - A mapping of tags assigned to the resource. + +The following attributes are exported only if there is an authorization rule named +`RootManageSharedAccessKey` which is created automatically by Azure. + +* `default_primary_connection_string` - The primary connection string for the authorization + rule `RootManageSharedAccessKey`. + +* `default_secondary_connection_string` - The secondary connection string for the + authorization rule `RootManageSharedAccessKey`. + +* `default_primary_key` - The primary access key for the authorization rule `RootManageSharedAccessKey`. + +* `default_secondary_key` - The secondary access key for the authorization rule `RootManageSharedAccessKey`. diff --git a/website/docs/d/storage_account.html.markdown b/website/docs/d/storage_account.html.markdown index 731c1a03a8ed..107cb343124e 100644 --- a/website/docs/d/storage_account.html.markdown +++ b/website/docs/d/storage_account.html.markdown @@ -64,18 +64,32 @@ output "storage_account_tier" { * `primary_blob_endpoint` - The endpoint URL for blob storage in the primary location. +* `primary_blob_host` - The hostname with port if applicable for blob storage in the primary location. + * `secondary_blob_endpoint` - The endpoint URL for blob storage in the secondary location. +* `secondary_blob_host` - The hostname with port if applicable for blob storage in the secondary location. + * `primary_queue_endpoint` - The endpoint URL for queue storage in the primary location. +* `primary_queue_host` - The hostname with port if applicable for queue storage in the primary location. + * `secondary_queue_endpoint` - The endpoint URL for queue storage in the secondary location. +* `secondary_queue_host` - The hostname with port if applicable for queue storage in the secondary location. + * `primary_table_endpoint` - The endpoint URL for table storage in the primary location. +* `primary_table_host` - The hostname with port if applicable for table storage in the primary location. + * `secondary_table_endpoint` - The endpoint URL for table storage in the secondary location. +* `secondary_table_host` - The hostname with port if applicable for table storage in the secondary location. + * `primary_file_endpoint` - The endpoint URL for file storage in the primary location. +* `primary_file_host` - The hostname with port if applicable for file storage in the primary location. + * `primary_access_key` - The primary access key for the Storage Account. * `secondary_access_key` - The secondary access key for the Storage Account. diff --git a/website/docs/d/storage_account_sas.html.markdown b/website/docs/d/storage_account_sas.html.markdown index bcc71fa39111..60483dc61717 100644 --- a/website/docs/d/storage_account_sas.html.markdown +++ b/website/docs/d/storage_account_sas.html.markdown @@ -31,7 +31,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Standard" account_replication_type = "GRS" - tags { + tags = { environment = "staging" } } diff --git a/website/docs/guides/2.0-upgrade-guide.html.markdown b/website/docs/guides/2.0-upgrade-guide.html.markdown new file mode 100644 index 000000000000..165accb50ba0 --- /dev/null +++ b/website/docs/guides/2.0-upgrade-guide.html.markdown @@ -0,0 +1,352 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: 2.0 Upgrade Guide" +sidebar_current: "docs-azurerm-guide-2.0-upgrade" +description: |- + Azure Resource Manager: 2.0 Upgrade Guide + +--- + +# v2.0 of the AzureRM Provider + +Terraform initially shipped support for the AzureRM Provider back in December 2015. + +Since then we've added support for 191 Resources, 58 Data Sources and have launched a couple of related Providers in the form of [the Azure Active Directory Provider](https://www.terraform.io/docs/providers/azuread/index.html) and [the Azure Stack Provider](https://www.terraform.io/docs/providers/azurestack/index.html). + +Version 2.0 of the AzureRM Provider is a major release and as such includes some larger-scale changes which are outlined in this document. + +-> **NOTE:** This guide is a Work In Progress and additional information may be added to this guide until version 2.0 of the AzureRM Provider is released. + +### Pinning your Provider Version + +We recommend pinning the version of each Provider you use in Terraform - you can do this using the `version` attribute in the `provider` block, either to a specific version of the AzureRM Provider, like so: + +```hcl +provider "azurerm" { + version = "=1.22.0" +} +``` + +.. or to any 1.x release: + +```hcl +provider "azurerm" { + version = "~> 1.x" +} +``` + +More information on [how to pin the version of a Terraform Provider being used can be found on the Terraform Website](https://www.terraform.io/docs/configuration/providers.html#provider-versions). + +Once version 2.0 of the AzureRM Provider is released - you can then upgrade to it by updating the version specified in the Provider block, like so: + +```hcl +provider "azurerm" { + version = "=2.0.0" +} +``` + +## What's coming in Version 2.0 of the AzureRM Provider? + +At a high level, we're intending for version 2.0 to include the following changes: + +* [Changes when importing existing resources](#changes-when-importing-existing-resources) - when bringing Resources provisioned outside of Terraform under Terraform's control, these resources will now be required to be imported. +* [Custom Timeouts will be available on Resources](#custom-timeouts-for-resources) - this will allow you to specify a custom timeout for provisioning the resource in your Terraform Configuration [using the `timeouts` block](https://www.terraform.io/docs/configuration/resources.html#timeouts). +* [New resources for Virtual Machines and Virtual Machine Scale Sets](#new-resources-for-virtual-machines-and-virtual-machine-scale-sets). +* [Removing Fields, Data Sources and Resources which have been deprecated](#removal-of-deprecated-fields-data-sources-and-resources). + +Each of these topics is covered in more detail below, however please note that this guide is a Work In Progress until version 2.0 of the AzureRM Provider is released and thus things may be added/changed as necessary. + +## Changes when Importing Existing Resources + +Terraform allows for existing resources which have been created outside of Terraform to be Imported into Terraform's State. Once a resource is imported into the state, it's possible for Terraform to track changes and manage this resource. The Azure Provider allows Importing existing resources into the state (using `terraform import`) for (almost) every resource. + +Version 2.0 of the Azure Provider aims to solve an issue where it's possible to unintentionally import resources into the state by running `terraform apply`. To explain this further, the majority of Azure's API's are Upserts - which means that a resource will be updated if it exists, otherwise it'll be created. + +Where the unique identifier for (most) Azure resources is the `name` (rather than for example an `aws_instance` where AWS will generate a different unique identifier) - it's possible that users may have unintentionally imported existing resources into Terraform (and made changes to the resource) when running `terraform apply` when using the same unique identifier as an existing resource. + +Whilst this may allow resources to work in some cases, it leads to hard-to-diagnose bugs in others (which could have been caught during `terraform plan`). + +In order to match the behaviour of other Terraform Providers version 2.0 of the AzureRM Provider will require that existing resources are imported into the state prior to use. This means that Terraform will be checking for the presence of an existing resource prior to creating it - and will return an error similar to below: + +``` +A resource with the ID /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1 already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for `azurerm_resource_group` for more information. +``` + +Information on how to import a given Resource can be found in the documentation for that Resource - for example here's how to [here's how to import a Resource Group in Terraform](/docs/providers/azurerm/r/resource_group.html#import). + +You can opt into this behaviour in version 1.22 of the AzureRM Provider by setting the Environment Variable `ARM_PROVIDER_STRICT` to `true`. + +## Custom Timeouts for Resources + +Resources can optionally support a `timeouts` block - which allows users to specify a Custom Timeout for resource creation/deletion as part of the Terraform Configuration. + +Prior to version 2.0 the Azure Provider has a default value set for resource timeouts for an hour - which cannot be overridden. This works for the most-part but there are certain scenarios where it'd be helpful to override this. + +This is useful for resources which can take a long time to delete - for example deleting the `azurerm_resource_group` resource will delete any resources within it, which can take time. Within Terraform your Terraform Configuration this could be represented like so: + +```hcl +resource "azurerm_resource_group" "test" { + name = "example-resource-group" + location = "West Europe" + + timeouts { + create = "10m" + delete = "30m" + } +} +``` + +We intend to support the `timeouts` block in version 2.0 of the Azure Provider - which will allow timeouts to be specified on resources (as shown above). [This feature request is being tracked here](https://github.com/terraform-providers/terraform-provider-azurerm/issues/171) and will form part of the 2.0 release of the AzureRM Provider. + +## New Resources for Virtual Machines and Virtual Machine Scale Sets + +We originally shipped support for the `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` resources back in March 2016. + +Over time new features have been added to these resources by Azure, such as Managed Disks and Managed Service Identity which these resources support. Since these resources first launched Azure's also changed the behaviour of some fields, so that it's now possible to update them where this wasn't previously possible - for example the Custom Data for a Virtual Machine. + +We've spent some time thinking about how we can accommodate these changes and about how we can improve the user experience of both resources. +In particular we've wanted to be able to give better validation during `terraform plan`, rather than bailing out with an Azure API error during `terraform apply`, however this isn't possible with the current resource structure since they're very generic. The validation requirements also vary substantially based on the fields provided, for example the `name` field for a Virtual Machine can be up to 63 characters for a Linux Virtual Machine but only allows 15 characters for a Windows Virtual Machine. + +As such after spending some time reading through bug reports and thinking/prototyping some potential solutions to this - we believe the best path forward here is to split these resources out, so that we would have: + +* a Linux Virtual Machine Resource (working name: `azurerm_linux_virtual_machine`) +* a Windows Virtual Machine Resource (working name: `azurerm_windows_virtual_machine`) +* updating the Data Disk Attachment Resource to support Unmanaged Disks +* a Linux Virtual Machine Scale Set Resource (working name: `azurerm_linux_virtual_machine_scale_set`) +* a Windows Virtual Machine Scale Set Resource (working name: `azurerm_windows_virtual_machine_scale_set`) +* a separate resource for Virtual Machine Scale Set Extensions (working name `azurerm_virtual_machine_scale_set_extension`) + +Please Note: all of the resources mentioned above currently do not exist but will form part of the 2.0 release. + +Whilst we're aware that this isn't ideal since users will eventually have to update their code/import an existing resource - we believe this approach gives us a good footing for the future. In particular this allows us to re-consider the schema design so that we can both support these new use-cases, fix some bugs and improve the user experience with these resources. + +The existing `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` resources would continue to be available throughout the `2.x` releases - but over time we'd end up deprecating these in favour of the new resources. + +## Removal of Deprecated Fields, Data Sources and Resources + +The Azure Provider was first launched in December 2015 - over the years the Provider has accumulated fields which have been deprecated (either by Azure because they're no longer used or because we've created an external resource to use instead). + +Since version 2.0 is a major version - we intend to take this opportunity to remove deprecated Fields, Resources and Data Sources. + +Below we'll cover each of the Data Sources and Resources which will be affected by the 2.0 upgrade. + +-> **NOTE:** This guide is a Work In Progress and as such Fields, Resources and Data Sources may be added to this guide until version 2.0 of the AzureRM Provider is released. + + +### Data Source: `azurerm_azuread_application` + +The AzureAD Data Sources and Resources have been moved to [the new AzureAD Provider](https://www.terraform.io/docs/providers/azuread/index.html), as such this Data Source will be removed in v2.0 of the AzureRM Provider. + +A guide on how to migrate to using the new Provider [can be found here](https://www.terraform.io/docs/providers/azurerm/guides/migrating-to-azuread.html). + +### Data Source: `azurerm_azuread_service_principal` + +The AzureAD Data Sources and Resources have been moved to [the new AzureAD Provider](https://www.terraform.io/docs/providers/azuread/index.html), as such this Data Source will be removed in v2.0 of the AzureRM Provider. + +A guide on how to migrate to using the new Provider [can be found here](https://www.terraform.io/docs/providers/azurerm/guides/migrating-to-azuread.html). + +### Data Source: `azurerm_kubernetes_cluster` + +The deprecated field `dns_prefix` within the `agent_pool_profile` block will be removed. + +### Data Source: `azurerm_network_interface` + +The deprecated field `internal_fqdn` will be removed. + +### Data Source: `azurerm_scheduler_job_collection` + +Azure Scheduler is being retired in favour of Logic Apps ([more information can be found here](https://docs.microsoft.com/en-us/azure/scheduler/migrate-from-scheduler-to-logic-apps)) - as such this Data Source will be removed. + +### Resource: `azurerm_app_service_plan` + +The fields in the `properties` block (`app_service_environment_id`, `reserved` and `per_site_scaling`) have been moved to the top level - as such the `properties` block will be removed. + +### Resource: `azurerm_application_gateway` + +The deprecated `fqdn_list` field in the `backend_address_pool` block will be removed in favour of the `fqdns` field, which is available from v1.22 of the AzureRM Provider. + +The deprecated `ip_address_list` field in the `backend_address_pool` block will be removed in favour of the `ip_addresses` field, which is available from v1.22 of the AzureRM Provider. + +### Resource: `azurerm_automation_schedule` + +The deprecated `account_name` field will be removed. This has been deprecated in favour of the `automation_account_name` field. + +### Resource: `azurerm_azuread_application` + +The AzureAD Data Sources and Resources have been moved to [the new AzureAD Provider](https://www.terraform.io/docs/providers/azuread/index.html), as such this Resource will be removed in v2.0 of the AzureRM Provider. + +A guide on how to migrate to using the new Provider [can be found here](https://www.terraform.io/docs/providers/azurerm/guides/migrating-to-azuread.html). + +### Resource: `azurerm_azuread_service_principal` + +The AzureAD Data Sources and Resources have been moved to [the new AzureAD Provider](https://www.terraform.io/docs/providers/azuread/index.html), as such this Resource will be removed. + +A guide on how to migrate to using the new Provider [can be found here](https://www.terraform.io/docs/providers/azurerm/guides/migrating-to-azuread.html). + +### Resource: `azurerm_azuread_service_principal_password` + +The AzureAD Data Sources and Resources have been moved to [the new AzureAD Provider](https://www.terraform.io/docs/providers/azuread/index.html), as such this Resource will be removed. + +A guide on how to migrate to using the new Provider [can be found here](https://www.terraform.io/docs/providers/azurerm/guides/migrating-to-azuread.html). + +### Resource: `azurerm_container_group` + +The deprecated `port` and `protocol` fields in the `container` block will be removed. These fields have been moved into the `ports` block within the `ports` field. + +The deprecated `command` field in the `container` block will be removed. This has been replaced by the `commands` field in the container` block. + +### Resource: `azurerm_container_registry` + +The deprecated `storage_account` block will be removed. This has been replaced by the `storage_account_id` field and is only applicable to `Classic` Container Registries. + +### Resource: `azurerm_container_service` + +Azure Container Service (ACS) is being Deprecated in favour of Azure Kubernetes Service (AKS) ([more information can be found here](https://azure.microsoft.com/en-us/updates/azure-container-service-will-retire-on-january-31-2020/)), in preparation the resource will be removed. + +### Resource: `azurerm_cosmosdb_account` + +The deprecated `failover_policy` block will be removed. This has been replaced by the `geo_location` block. + +### Resource: `azurerm_dns_mx_record` + +The `preference` field in the `record` block will change from a String to an Integer to better reflect the API. + +### Resource: `azurerm_dns_ns_record` + +The deprecated `record` field will be removed. This has been replaced by the `records` field which accepts multiple values. + +### Resource: `azurerm_eventhub` + +The deprecated `location` field will be removed, since this is no longer used. + +## Resource: `azurerm_eventhub_authorization_rule` + +The deprecated `location` field will be removed, since this is no longer used. + +## Resource: `azurerm_eventhub_consumer_group` + +The deprecated `location` field will be removed, since this is no longer used. + +## Resource: `azurerm_eventhub_namespace` + +The deprecated `location` field will be removed, since this is no longer used. + +### Resource: `azurerm_firewall` + +The deprecated `internal_public_ip_address_id` field in the `ip_configuration` block will be removed. This field has been replaced by the `public_ip_address_id` field in the `ip_configuration` block. + +### Resource: `azurerm_kubernetes_cluster` + +The deprecated `dns_prefix` field in the `agent_pool_profile` block will be removed. This field has been removed by Azure and is no longer used. + +The deprecated `fqdn` field in the `agent_pool_profile` block will be removed. This has been replaced by the top-level field `fqdn`. + +The `service_principal` will be changing from a Set to a List, which will allow Terraform to better detect when the values have changed locally and as such can detect when this needs to be recreated. + +### Resource: `azurerm_lb_backend_address_pool` + +The deprecated `location` field will be removed, since this is no longer used. + +### Resource: `azurerm_lb_nat_probe` + +The deprecated `location` field will be removed, since this is no longer used. + +### Resource: `azurerm_lb_nat_rule` + +The deprecated `location` field will be removed, since this is no longer used. + +### Resource: `azurerm_lb_probe` + +The deprecated `location` field will be removed, since this is no longer used. + +### Resource: `azurerm_lb_rule` + +The deprecated `location` field will be removed, since this is no longer used. + +### Resource: `azurerm_log_analytics_linked_service` + +The `resource_id` field has been moved from the `linked_service_properties` block to the top-level. + +The `linked_service_properties` block will be removed, since it's no longer required. + +### Resource: `azurerm_log_analytics_workspace_linked_service` + +This resource has been renamed to `azurerm_log_analytics_linked_service` which is available from v1.22 of the AzureRM Provider - instructions on [how to migrate are available in this guide](https://terraform.io/docs/providers/azurerm/guides/migrating-between-renamed-resources.html). As such this resource will be removed. + +### Resource: `azurerm_mssql_elasticpool` + +The deprecated `elastic_pool_properties` block will be removed. The fields inside this block have been moved to the top-level. + +### Resource: `azurerm_network_interface` + +The `application_gateway_backend_address_pools_ids` field in the `ip_configuration` block will been removed. This has been replaced by the `azurerm_network_interface_application_gateway_backend_address_pool_association` resource. + +The `application_security_group_ids` field in the `ip_configuration` block will been removed. This has been replaced by the `azurerm_network_interface_application_security_group_association` resource. + +The `load_balancer_backend_address_pools_ids` field in the `ip_configuration` block will been removed. This has been replaced by the `azurerm_network_interface_backend_address_pool_association` resource. + +The `load_balancer_inbound_nat_rules_ids` field in the `ip_configuration` block will been removed. This has been replaced by the `azurerm_network_interface_nat_rule_association` resource. + +### Resource: `azurerm_public_ip` + +The deprecated `public_ip_address_allocation` field will be removed. This field has been replaced by `allocation_method`. + +### Resource: `azurerm_scheduler_job` + +Azure Scheduler is being retired in favour of Logic Apps ([more information can be found here](https://docs.microsoft.com/en-us/azure/scheduler/migrate-from-scheduler-to-logic-apps)) - as such this Resource will be removed in v2.0 of the AzureRM Provider. + +### Resource: `azurerm_scheduler_job_collection` + +Azure Scheduler is being retired in favour of Logic Apps ([more information can be found here](https://docs.microsoft.com/en-us/azure/scheduler/migrate-from-scheduler-to-logic-apps)) - as such this Resource will be removed in v2.0 of the AzureRM Provider. + +### Resource: `azurerm_servicebus_queue` + +The deprecated `location` field will be removed, since this is no longer used. + +The deprecated `enable_batched_operations` field will be removed, since this is no longer used. + +The deprecated `support_ordering` field will be removed, since this is no longer used. + +### Resource: `azurerm_servicebus_subscription` + +The deprecated `location` field will be removed, since this is no longer used. + +The deprecated `dead_lettering_on_filter_evaluation_exceptions` field will be removed, since this is no longer used. + +### Resource: `azurerm_servicebus_topic` + +The deprecated `location` field will be removed, since this is no longer used. + +The deprecated `enable_filtering_messages_before_publishing` field will be removed, since this is no longer used. + +### Resource: `azurerm_storage_account` + +The deprecated `account_type` field will be removed. This has been split into the fields `account_tier` and `account_replication_type`. + +### Resource: `azurerm_subnet` + +The deprecated field `network_security_group_id` will be removed. This has been replaced by the `azurerm_subnet_network_security_group_association` resource. + +The deprecated field `route_table_id` will be removed. This has been replaced by the `azurerm_subnet_route_table_association` resource. + +### Resource: `azurerm_virtual_machine` + +The `azurerm_virtual_machine` resource will be deprecated in favour of two new resources: `azurerm_linux_virtual_machine` and `azurerm_windows_virtual_machine`. + +Splitting the Virtual Machine resource in two allows us to both provide finer-grain validation for this resource and update the schema. + +The existing `azurerm_virtual_machine` Resource will continue to be available in it's current form, however it will eventually be deprecated and we recommend using the new resources going forward. + +### Resource: `azurerm_virtual_machine_scale_set` + +The `azurerm_virtual_machine_scale_set` resource will be deprecated in favour of two new resources: `azurerm_linux_virtual_machine_scale_set` and `azurerm_windows_virtual_machine_scale_set`. + +Splitting the Virtual Machine Scale Set resource in two allows us to both provide finer-grain validation for this resource and update the schema. + +The existing `azurerm_virtual_machine_scale_set` Resource will continue to be available in it's current form, however it will eventually be deprecated and we recommend using the new resources going forward. + +--- + +We've spent the past few months laying the groundwork for these changes - and whilst we appreciate that your Terraform Configurations may require code changes to upgrade to 2.0 - we take Semantic Versioning seriously and so try our best to limit these changes to major versions. + +You can follow along with the work in the 2.0 release [in this GitHub Milestone](https://github.com/terraform-providers/terraform-provider-azurerm/milestone/31) - and [in this GitHub Issue](https://github.com/terraform-providers/terraform-provider-azurerm/issues/2807). diff --git a/website/docs/guides/migrating-between-renamed-resources.html.markdown b/website/docs/guides/migrating-between-renamed-resources.html.markdown new file mode 100644 index 000000000000..950559b2adf1 --- /dev/null +++ b/website/docs/guides/migrating-between-renamed-resources.html.markdown @@ -0,0 +1,100 @@ +--- +layout: "azurerm" +page_title: "Azure Provider: Migrating to a renamed resource" +sidebar_current: "docs-azurerm-migrating-to-a-renamed-resource" +description: |- + This page documents how to migrate between two resources in the Azure Provider which have been renamed. + +--- + +# Azure Provider: Migrating to a renamed resource + +In v1.22 of the AzureRM Provider several resources have been deprecated in favour of a renamed version - this guide covers how to migrate from the old resource to the new one and is applicable for the following resources: + +| Old Name | New Name | +| ---------------------------------------------- | ------------------------------------ | +| azurerm_log_analytics_workspace_linked_service | azurerm_log_analytics_linked_service | +| azurerm_autoscale_setting | azurerm_monitor_autoscale_setting | +| azurerm_metric_alertrule | azurerm_monitor_metric_alertrule | + +As the Schema's for each resource are the same at this time - it's possible to migrate between the resources by updating your Terraform Configuration and updating the Statefile. + +In this guide we'll assume we're migrating from the `azurerm_autoscale_setting` resource to the new `azurerm_monitor_autoscale_setting` resource, but this should be applicable for any of the resources listed above. + +Assuming we have the following Terraform Configuration: + +```hcl +resource "azurerm_resource_group" "test" { + # ... +} + +resource "azurerm_virtual_machine_scale_set" "test" { + # ... +} + +resource "azurerm_autoscale_setting" "test" { + name = "myAutoscaleSetting" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + # ... +} +``` + +We can update the Terraform Configuration to use the new resource by updating the name from `azurerm_autoscale_setting` to `azurerm_monitor_autoscale_setting`: + +```hcl +resource "azurerm_resource_group" "test" { + # ... +} + +resource "azurerm_virtual_machine_scale_set" "test" { + # ... +} + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "myAutoscaleSetting" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + # ... +} +``` + +As the Terraform Configuration has been updated - we now need to update the State. We can view the items Terraform is tracking in it's Statefile using the `terraform state list` command, for example: + +```bash +$ terraform state list +azurerm_autoscale_setting.test +azurerm_resource_group.test +azurerm_virtual_machine.test +``` + +We can move each of the resources which has been renamed in the state using the `terraform state mv` command - for example: + +```shell +$ terraform state mv azurerm_autoscale_setting.test azurerm_monitor_autoscale_setting.test +Moved azurerm_autoscale_setting.test to azurerm_monitor_autoscale_setting.test +``` + +Once this has been done, running `terraform plan` should show no changes: + +```shell +$ terraform plan +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +No changes. Infrastructure is up-to-date. + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. +``` + +At this point you've switched over to using the newly renamed resources and should be able to continue using Terraform as normal. diff --git a/website/docs/guides/migrating-to-azuread.html.markdown b/website/docs/guides/migrating-to-azuread.html.markdown index 8bcedff449b7..62b66e392ec7 100644 --- a/website/docs/guides/migrating-to-azuread.html.markdown +++ b/website/docs/guides/migrating-to-azuread.html.markdown @@ -25,7 +25,7 @@ As the AzureAD and AzureRM Provider support the same authentication methods - it ```hcl provider "azurerm" { - version = "=1.21.0" + version = "=1.22.0" } ``` diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 2c8267634a13..219bec156389 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -32,7 +32,7 @@ We recommend using either a Service Principal or Managed Service Identity when r # Configure the Azure Provider provider "azurerm" { # whilst the `version` attribute is optional, we recommend pinning to a given version of the Provider - version = "=1.21.0" + version = "=1.22.0" } # Create a resource group diff --git a/website/docs/r/api_management_group.html.markdown b/website/docs/r/api_management_group.html.markdown new file mode 100644 index 000000000000..571a6f5e8074 --- /dev/null +++ b/website/docs/r/api_management_group.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_api_management_group" +sidebar_current: "docs-azurerm-resource-api-management-group" +description: |- + Manages an API Management Group. +--- + +# azurerm_api_management_group + +Manages an API Management Group. + + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West US" +} + +resource "azurerm_api_management" "example" { + name = "example-apim" + location = "${azurerm_resource_group.example.location}" + resource_group_name = "${azurerm_resource_group.example.name}" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_group" "example" { + name = "example-apimg" + resource_group_name = "${azurerm_resource_group.example.name}" + api_management_name = "${azurerm_api_management.example.name}" + display_name = "Example Group" + description = "This is an example API management group." +} +``` + + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the API Management Group. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group in which the API Management Group should exist. Changing this forces a new resource to be created. + +* `api_management_name` - (Required) The name of the [API Management Service](api_management.html) in which the API Management Group should exist. Changing this forces a new resource to be created. + +* `display_name` - (Required) The display name of this API Management Group. + +* `description` - (Optional) The description of this API Management Group. + +* `external_id` - (Optional) The identifier of the external Group. For example, an Azure Active Directory group `aad://.onmicrosoft.com/groups/`. + +* `type` - (Optional) The type of this API Management Group. Possible values are `custom` and `external`. Default is `custom`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the API Management Group. + +## Import + +API Management Groups can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_api_management_group.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.ApiManagement/service/example-apim/groups/example-apimg +``` diff --git a/website/docs/r/api_management_product.html.markdown b/website/docs/r/api_management_product.html.markdown new file mode 100644 index 000000000000..ca0b6aca4771 --- /dev/null +++ b/website/docs/r/api_management_product.html.markdown @@ -0,0 +1,87 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_api_management_product" +sidebar_current: "docs-azurerm-resource-api-management-product" +description: |- + Manages an API Management Product. +--- + +# azurerm_api_management_product + +Manages an API Management Product. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_api_management" "test" { + name = "example-apim" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "My Company" + publisher_email = "company@terraform.io" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_product" "test" { + product_id = "test-product" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + display_name = "Test Product" + subscription_required = true + approval_required = true + published = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `api_management_name` - (Required) The name of the API Management Service. Changing this forces a new resource to be created. + +* `approval_required` - (Optional) Do subscribers need to be approved prior to being able to use the Product? + +-> **NOTE:** `approval_required` can only be set when `subscription_required` is set to `true`. + +* `display_name` - (Required) The Display Name for this API Management Product. + +* `product_id` - (Required) The Identifier for this Product, which must be unique within the API Management Service. Changing this forces a new resource to be created. + +* `published` - (Required) Is this Product Published? + +* `resource_group_name` - (Required) The name of the Resource Group in which the API Management Service should be exist. Changing this forces a new resource to be created. + +* `subscription_required` - (Required) Is a Subscription required to access API's included in this Product? + +--- + +* `description` - (Optional) A description of this Product, which may include HTML formatting tags. + +* `subscriptions_limit` - (Optional) The number of subscriptions a user can have to this Product at the same time. + +-> **NOTE:** `subscriptions_limit` can only be set when `subscription_required` is set to `true`. + +* `terms` - (Optional) The Terms and Conditions for this Product, which must be accepted by Developers before they can begin the Subscription process. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the API Management Product. + +## Import + +API Management Products can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_api_management_product.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.ApiManagement/service/instance1/products/myproduct +``` diff --git a/website/docs/r/api_management_user.html.markdown b/website/docs/r/api_management_user.html.markdown new file mode 100644 index 000000000000..09c5a638e499 --- /dev/null +++ b/website/docs/r/api_management_user.html.markdown @@ -0,0 +1,86 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_api_management_user" +sidebar_current: "docs-azurerm-resource-api-management-user" +description: |- + Manages an API Management User. +--- + +# azurerm_api_management_user + +Manages an API Management User. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_api_management" "test" { + name = "example-apim" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + publisher_name = "My Company" + publisher_email = "company@terraform.io" + + sku { + name = "Developer" + capacity = 1 + } +} + +resource "azurerm_api_management_user" "test" { + user_id = "5931a75ae4bbd512288c680b" + api_management_name = "${azurerm_api_management.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + first_name = "Example" + last_name = "User" + email = "tom+tfdev@hashicorp.com" + state = "active" +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `api_management_name` - (Required) The name of the API Management Service in which the User should be created. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created. + +* `email` - (Required) The email address associated with this user. + +* `first_name` - (Required) The first name for this user. + +* `last_name` - (Required) The last name for this user. + +* `user_id` - (Required) The Identifier for this User, which must be unique within the API Management Service. Changing this forces a new resource to be created. + +--- + +* `confirmation` - (Optional) The kind of confirmation email which will be sent to this user. Possible values are `invite` and `signup`. Changing this forces a new resource to be created. + +* `note` - (Optional) A note about this user. + +* `password` - (Optional) The password associated with this user. + +* `state` - (Optional) The state of this user. Possible values are `active`, `blocked` and `pending`. + +-> **NOTE:** the State can be changed from Pending -> Active/Blocked but not from Active/Blocked -> Pending. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the API Management User. + +## Import + +API Management Users can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_api_management_user.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.ApiManagement/service/instance1/users/abc123 +``` diff --git a/website/docs/r/app_service.html.markdown b/website/docs/r/app_service.html.markdown index 30d8125dffbf..dc672ce51f5e 100644 --- a/website/docs/r/app_service.html.markdown +++ b/website/docs/r/app_service.html.markdown @@ -13,24 +13,18 @@ Manages an App Service (within an App Service Plan). -> **Note:** When using Slots - the `app_settings`, `connection_string` and `site_config` blocks on the `azurerm_app_service` resource will be overwritten when promoting a Slot using the `azurerm_app_service_active_slot` resource. -## Example Usage (.net 4.x) +## Example Usage -```hcl -resource "random_id" "server" { - keepers = { - azi_id = 1 - } - - byte_length = 8 -} +This example provisions a Windows App Service. Other examples of the `azurerm_app_service` resource can be found in [the `./examples/app-service` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/app-service) +```hcl resource "azurerm_resource_group" "test" { - name = "some-resource-group" + name = "example-resources" location = "West Europe" } resource "azurerm_app_service_plan" "test" { - name = "some-app-service-plan" + name = "example-appserviceplan" location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" @@ -41,7 +35,7 @@ resource "azurerm_app_service_plan" "test" { } resource "azurerm_app_service" "test" { - name = "${random_id.server.hex}" + name = "example-app-service" location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" app_service_plan_id = "${azurerm_app_service_plan.test.id}" @@ -51,7 +45,7 @@ resource "azurerm_app_service" "test" { scm_type = "LocalGit" } - app_settings { + app_settings = { "SOME_KEY" = "some-value" } @@ -63,48 +57,6 @@ resource "azurerm_app_service" "test" { } ``` -## Example Usage (Java 1.8) - -```hcl -resource "random_id" "server" { - keepers = { - azi_id = 1 - } - - byte_length = 8 -} - -resource "azurerm_resource_group" "test" { - name = "some-resource-group" - location = "West Europe" -} - -resource "azurerm_app_service_plan" "test" { - name = "some-app-service-plan" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - sku { - tier = "Standard" - size = "S1" - } -} - -resource "azurerm_app_service" "test" { - name = "${random_id.server.hex}" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - app_service_plan_id = "${azurerm_app_service_plan.test.id}" - - site_config { - java_version = "1.8" - java_container = "JETTY" - java_container_version = "9.3" - scm_type = "LocalGit" - } -} -``` - ## Argument Reference The following arguments are supported: @@ -119,10 +71,12 @@ The following arguments are supported: * `app_settings` - (Optional) A key-value pair of App Settings. -* `connection_string` - (Optional) An `connection_string` block as defined below. +* `connection_string` - (Optional) One or more `connection_string` blocks as defined below. * `client_affinity_enabled` - (Optional) Should the App Service send session affinity cookies, which route client requests in the same session to the same instance? +* `client_cert_enabled` - (Optional) Does the App Service require client certificates for incoming requests? Defaults to `false`. + * `enabled` - (Optional) Is the App Service Enabled? Changing this forces a new resource to be created. * `https_only` - (Optional) Can the App Service only be accessed via HTTPS? Defaults to `false`. @@ -135,15 +89,17 @@ The following arguments are supported: --- -`connection_string` supports the following: +A `connection_string` block supports the following: * `name` - (Required) The name of the Connection String. + * `type` - (Required) The type of the Connection String. Possible values are `APIHub`, `Custom`, `DocDb`, `EventHub`, `MySQL`, `NotificationHub`, `PostgreSQL`, `RedisCache`, `ServiceBus`, `SQLAzure` and `SQLServer`. + * `value` - (Required) The value for the Connection String. --- -`identity` supports the following: +A `identity` block supports the following: * `type` - (Required) Specifies the identity type of the App Service. At this time the only allowed value is `SystemAssigned`. @@ -151,30 +107,48 @@ The following arguments are supported: --- -`site_config` supports the following: +A `site_config` block supports the following: * `always_on` - (Optional) Should the app be loaded at all times? Defaults to `false`. + * `app_command_line` - (Optional) App command line to launch, e.g. `/sbin/myserver -b 0.0.0.0`. + * `default_documents` - (Optional) The ordering of default documents to load, if an address isn't specified. + * `dotnet_framework_version` - (Optional) The version of the .net framework's CLR used in this App Service. Possible values are `v2.0` (which will use the latest version of the .net framework for the .net CLR v2 - currently `.net 3.5`) and `v4.0` (which corresponds to the latest version of the .net CLR v4 - which at the time of writing is `.net 4.7.1`). [For more information on which .net CLR version to use based on the .net framework you're targeting - please see this table](https://en.wikipedia.org/wiki/.NET_Framework_version_history#Overview). Defaults to `v4.0`. + +* `ftps_state` - (Optional) State of FTP / FTPS service for this App Service. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. + * `http2_enabled` - (Optional) Is HTTP2 Enabled on this App Service? Defaults to `false`. -* `ftps_state` - (Optional) State of FTP / FTPS service for this AppService. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. + * `ip_restriction` - (Optional) One or more `ip_restriction` blocks as defined below. + * `java_version` - (Optional) The version of Java to use. If specified `java_container` and `java_container_version` must also be specified. Possible values are `1.7` and `1.8`. + * `java_container` - (Optional) The Java Container to use. If specified `java_version` and `java_container_version` must also be specified. Possible values are `JETTY` and `TOMCAT`. + * `java_container_version` - (Optional) The version of the Java Container to use. If specified `java_version` and `java_container` must also be specified. * `local_mysql_enabled` - (Optional) Is "MySQL In App" Enabled? This runs a local MySQL instance with your app and shares resources from the App Service plan. ~> **NOTE:** MySQL In App is not intended for production environments and will not scale beyond a single instance. Instead you may wish [to use Azure Database for MySQL](/docs/providers/azurerm/r/mysql_database.html). -* `linux_fx_version` - (Optional) Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`. +* `linux_fx_version` - (Optional) Linux App Framework and version for the App Service. Possible options are a Docker container (`DOCKER|`), a base-64 encoded Docker Compose file (`COMPOSE|${base64encode(file("compose.yml"))}`) or a base-64 encoded Kubernetes Manifest (`KUBE|${base64encode(file("kubernetes.yml"))}`). + +Additional examples of how to run Containers via the `azurerm_app_service` resource can be found in [the `./examples/app-service` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/app-service). + * `managed_pipeline_mode` - (Optional) The Managed Pipeline Mode. Possible values are `Integrated` and `Classic`. Defaults to `Integrated`. + * `min_tls_version` - (Optional) The minimum supported TLS version for the app service. Possible values are `1.0`, `1.1`, and `1.2`. Defaults to `1.2` for new app services. + * `php_version` - (Optional) The version of PHP to use in this App Service. Possible values are `5.5`, `5.6`, `7.0`, `7.1` and `7.2`. + * `python_version` - (Optional) The version of Python to use in this App Service. Possible values are `2.7` and `3.4`. + * `remote_debugging_enabled` - (Optional) Is Remote Debugging Enabled? Defaults to `false`. + * `remote_debugging_version` - (Optional) Which version of Visual Studio should the Remote Debugger be compatible with? Possible values are `VS2012`, `VS2013`, `VS2015` and `VS2017`. + * `scm_type` - (Optional) The type of Source Control enabled for this App Service. Possible values include `None` and `LocalGit`. Defaults to `None`. ~> **NOTE:** Additional Source Control types will be added in the future, once support for them has been added in the Azure SDK for Go. @@ -189,7 +163,7 @@ The following arguments are supported: --- -`ip_restriction` supports the following: +A `ip_restriction` block supports the following: * `ip_address` - (Required) The IP Address used for this IP Restriction. diff --git a/website/docs/r/app_service_slot.html.markdown b/website/docs/r/app_service_slot.html.markdown index 9345f03f6ba5..232e7a9b1007 100644 --- a/website/docs/r/app_service_slot.html.markdown +++ b/website/docs/r/app_service_slot.html.markdown @@ -51,7 +51,7 @@ resource "azurerm_app_service" "test" { dotnet_framework_version = "v4.0" } - app_settings { + app_settings = { "SOME_KEY" = "some-value" } @@ -73,7 +73,7 @@ resource "azurerm_app_service_slot" "test" { dotnet_framework_version = "v4.0" } - app_settings { + app_settings = { "SOME_KEY" = "some-value" } diff --git a/website/docs/r/application_gateway.html.markdown b/website/docs/r/application_gateway.html.markdown index f8fb46713552..78f9ac82dcc2 100644 --- a/website/docs/r/application_gateway.html.markdown +++ b/website/docs/r/application_gateway.html.markdown @@ -89,6 +89,7 @@ resource "azurerm_application_gateway" "network" { backend_http_settings { name = "${local.http_setting_name}" cookie_based_affinity = "Disabled" + path = "/path1/" port = 80 protocol = "Http" request_timeout = 1 @@ -143,14 +144,20 @@ The following arguments are supported: * `disabled_ssl_protocols` - (Optional) A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are `TLSv1_0`, `TLSv1_1` and `TLSv1_2`. +* `http2_enabled` - (Optional) Is HTTP2 enabled on the application gateway resource? Defaults to `false`. + * `probe` - (Optional) One or more `probe` blocks as defined below. +* `ssl_certificate` - (Optional) One or more `ssl_certificate` blocks as defined below. + * `tags` - (Optional) A mapping of tags to assign to the resource. * `url_path_map` - (Optional) One or more `url_path_map` blocks as defined below. * `waf_configuration` - (Optional) A `waf_configuration` block as defined below. +* `custom_error_configuration` - (Optional) One or more `custom_error_configuration` blocks as defined below. + --- A `authentication_certificate` block supports the following: @@ -171,9 +178,13 @@ A `backend_address_pool` block supports the following: * `name` - (Required) The name of the Backend Address Pool. -* `fqdn_list` - (Optional) A list of FQDN's which should be part of the Backend Address Pool. +* `fqdns` - (Optional) A list of FQDN's which should be part of the Backend Address Pool. -* `ip_address_list` - (Optional) A list of IP Addresses which should be part of the Backend Address Pool. +* `fqdn_list` - (Optional **Deprecated**) A list of FQDN's which should be part of the Backend Address Pool. This field has been deprecated in favour of `fqdns` and will be removed in v2.0 of the AzureRM Provider. + +* `ip_addresses` - (Optional) A list of IP Addresses which should be part of the Backend Address Pool. + +* `ip_address_list` - (Optional **Deprecated**) A list of IP Addresses which should be part of the Backend Address Pool. This field has been deprecated in favour of `ip_addresses` and will be removed in v2.0 of the AzureRM Provider. --- @@ -183,6 +194,8 @@ A `backend_http_settings` block supports the following: * `name` - (Required) The name of the Backend HTTP Settings Collection. +* `path` - (Optional) The Path which should be used as a prefix for all HTTP requests. + * `port`- (Required) The port which should be used for this Backend HTTP Settings Collection. * `probe_name` - (Required) The name of an associated HTTP Probe. @@ -195,8 +208,19 @@ A `backend_http_settings` block supports the following: * `authentication_certificate` - (Optional) One or more `authentication_certificate` blocks. +* `connection_draining` - (Optional) A `connection_draining` block as defined below. + --- +A `connection_draining` block supports the following: + +* `enabled` - (Required) If connection draining is enabled or not. + +* `drain_timeout_sec` - (Required) The number of seconds connection draining is active. Acceptable values are from `1` second to `3600` seconds. + +--- + + A `frontend_ip_configuration` block supports the following: * `name` - (Required) The name of the Frontend IP Configuration. @@ -245,6 +269,8 @@ A `http_listener` block supports the following: * `ssl_certificate_name` - (Optional) The name of the associated SSL Certificate which should be used for this HTTP Listener. +* `custom_error_configuration` - (Optional) One or more `custom_error_configuration` blocks as defined below. + --- A `match` block supports the following: @@ -317,6 +343,16 @@ A `sku` block supports the following: --- +A `ssl_certificate` block supports the following: + +* `name` - (Required) The Name of the SSL certificate that is unique within this Application Gateway + +* `data` - (Required) PFX certificate. + +* `password` - (Required) Password for the pfx file specified in data. + +--- + A `url_path_map` block supports the following: * `name` - (Required) The Name of the URL Path Map. @@ -341,6 +377,14 @@ A `waf_configuration` block supports the following: * `file_upload_limit_mb` - (Optional) The File Upload Limit in MB. Accepted values are in the range `1`MB to `500`MB. Defaults to `100`MB. +--- + +A `custom_error_configuration` block supports the following: + +* `status_code` - (Required) Status code of the application gateway customer error. Possible values are `HttpStatus403` and `HttpStatus502` + +* `custom_error_page_url` - (Required) Error page URL of the application gateway customer error. + ## Attributes Reference The following attributes are exported: @@ -371,6 +415,8 @@ The following attributes are exported: * `url_path_map` - A list of `url_path_map` blocks as defined below. +* `custom_error_configuration` - A list of `custom_error_configuration` blocks as defined below. + --- A `authentication_certificate` block exports the following: @@ -477,6 +523,12 @@ A `url_path_map` block exports the following: * `path_rule` - A list of `path_rule` blocks as defined above. +--- + +A `custom_error_configuration` block exports the following: + +* `id` - The ID of the Custom Error Configuration. + ## Import Application Gateway's can be imported using the `resource id`, e.g. diff --git a/website/docs/r/application_security_group.html.markdown b/website/docs/r/application_security_group.html.markdown index 0f363bdc1657..dd4cd5dd9894 100644 --- a/website/docs/r/application_security_group.html.markdown +++ b/website/docs/r/application_security_group.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_application_security_group" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Hello" = "World" } } diff --git a/website/docs/r/automation_account.html.markdown b/website/docs/r/automation_account.html.markdown index e967c3dd85b9..d3918d9bd194 100644 --- a/website/docs/r/automation_account.html.markdown +++ b/website/docs/r/automation_account.html.markdown @@ -27,7 +27,7 @@ resource "azurerm_automation_account" "example" { name = "Basic" } - tags { + tags = { environment = "development" } } diff --git a/website/docs/r/autoscale_setting.html.markdown b/website/docs/r/autoscale_setting.html.markdown index dfb47bdeff7d..1ee02d975cd0 100644 --- a/website/docs/r/autoscale_setting.html.markdown +++ b/website/docs/r/autoscale_setting.html.markdown @@ -10,6 +10,8 @@ description: |- Manages an AutoScale Setting which can be applied to Virtual Machine Scale Sets, App Services and other scalable resources. +~> **NOTE:** This resource has been deprecated in favour of the `azurerm_monitor_autoscale_setting` resource and will be removed in the next major version of the AzureRM Provider. The new resource shares the same fields as this one, and information on migrating across [can be found in this guide](../guides/migrating-between-renamed-resources.html). + ## Example Usage ```hcl @@ -297,13 +299,13 @@ A `profile` block supports the following: A `capacity` block supports the following: -* `default` - (Required) The number of instances that are available for scaling if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. +* `default` - (Required) The number of instances that are available for scaling if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. Valid values are between `0` and `1000`. -* `maximum` - (Required) The maximum number of instances for this resource. Valid values are between `1` and `40`. +* `maximum` - (Required) The maximum number of instances for this resource. Valid values are between `0` and `1000`. -> **NOTE:** The maximum number of instances is also limited by the amount of Cores available in the subscription. -* `minimum` - (Required) The minimum number of instances for this resource. Valid values are between `1` and `40`. +* `minimum` - (Required) The minimum number of instances for this resource. Valid values are between `0` and `1000`. --- @@ -317,7 +319,9 @@ A `rule` block supports the following: A `metric_trigger` block supports the following: -* `metric_name` - (Required) The name of the metric that defines what the rule monitors, such as `Percentage CPU`. +* `metric_name` - (Required) The name of the metric that defines what the rule monitors, such as `Percentage CPU` for `Virtual Machine Scale Sets` and `CpuPercentage` for `App Service Plan`. + +-> **NOTE:** The allowed value of `metric_name` highly depends on the targeting resource type, please visit [Supported metrics with Azure Monitor](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/metrics-supported) for more details. * `metric_resource_id` - (Required) The ID of the Resource which the Rule monitors. diff --git a/website/docs/r/availability_set.html.markdown b/website/docs/r/availability_set.html.markdown index 8635129c7e5b..8aad2e8e1c3e 100644 --- a/website/docs/r/availability_set.html.markdown +++ b/website/docs/r/availability_set.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_availability_set" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/batch_account.html.markdown b/website/docs/r/batch_account.html.markdown index 17205cc6a25d..b36a1e9a796f 100644 --- a/website/docs/r/batch_account.html.markdown +++ b/website/docs/r/batch_account.html.markdown @@ -34,7 +34,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 39c441c33ba0..052648ec4e9d 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -34,7 +34,7 @@ resource "azurerm_batch_account" "test" { pool_allocation_mode = "BatchService" storage_account_id = "${azurerm_storage_account.test.id}" - tags { + tags = { env = "test" } } @@ -46,7 +46,7 @@ resource "azurerm_batch_pool" "test" { display_name = "Test Acc Pool Auto" vm_size = "Standard_A1" node_agent_sku_id = "batch.node.ubuntu 16.04" - + auto_scale { evaluation_interval = "PT15M" formula = < **NOTE:** This Resource requires that [the Network Watcher Agent Virtual Machine Extension](https://docs.microsoft.com/en-us/azure/network-watcher/connection-monitor) is installed on the Virtual Machine before monitoring can be started. The extension can be installed via [the `azurerm_virtual_machine_extension` resource](virtual_machine_extension.html). + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Connection Monitor. Changing this forces a new resource to be created. + +* `network_watcher_name` - (Required) The name of the Network Watcher. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to create the Connection Monitor. Changing this forces a new resource to be created. + +* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + +* `auto_start` - (Optional) Specifies whether the connection monitor will start automatically once created. Defaults to `true`. Changing this forces a new resource to be created. + +* `interval_in_seconds` - (Optional) Monitoring interval in seconds. Defaults to `60`. + +* `source` - (Required) A `source` block as defined below. + +* `destination` - (Required) A `destination` block as defined below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +--- + +A `source` block contains: + +* `virtual_machine_id` - (Required) The ID of the Virtual Machine to monitor connectivity from. + +* `port` - (Optional) The port on the Virtual Machine to monitor connectivity from. Defaults to `0` (Dynamic Port Assignment). + +A `destination` block contains: + +* `virtual_machine_id` - (Optional) The ID of the Virtual Machine to monitor connectivity to. + +* `address` - (Optional) IP address or domain name to monitor connectivity to. + +* `port` - (Required) The port on the destination to monitor connectivity to. + +~> **NOTE:** One of `virtual_machine_id` or `address` must be specified. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Connection Monitor ID. + +## Import + +Connection Monitors can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_connection_monitor.monitor1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkWatchers/watcher1/connectionMonitors/monitor1 +``` diff --git a/website/docs/r/container_group.html.markdown b/website/docs/r/container_group.html.markdown index 307dc9be1642..2fc1f45a407f 100644 --- a/website/docs/r/container_group.html.markdown +++ b/website/docs/r/container_group.html.markdown @@ -58,11 +58,11 @@ resource "azurerm_container_group" "aci-helloworld" { protocol = "TCP" } - environment_variables { + environment_variables = { "NODE_ENV" = "testing" } - secure_environment_variables { + secure_environment_variables = { "ACCESS_KEY" = "secure_testing" } @@ -86,7 +86,7 @@ resource "azurerm_container_group" "aci-helloworld" { memory = "1.5" } - tags { + tags = { environment = "testing" } } diff --git a/website/docs/r/container_service.html.markdown b/website/docs/r/container_service.html.markdown index 3449860e5d1a..131f8c055f4c 100644 --- a/website/docs/r/container_service.html.markdown +++ b/website/docs/r/container_service.html.markdown @@ -53,7 +53,7 @@ resource "azurerm_container_service" "test" { enabled = false } - tags { + tags = { Environment = "Production" } } @@ -102,7 +102,7 @@ resource "azurerm_container_service" "test" { enabled = false } - tags { + tags = { Environment = "Production" } } @@ -146,7 +146,7 @@ resource "azurerm_container_service" "test" { enabled = false } - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/data_lake_store.html.markdown b/website/docs/r/data_lake_store.html.markdown index 4b13e036f591..ebb1fefd5e9c 100644 --- a/website/docs/r/data_lake_store.html.markdown +++ b/website/docs/r/data_lake_store.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_data_lake_store" "example" { resource_group_name = "${azurerm_resource_group.example.name}" location = "${azurerm_resource_group.example.location}" encryption_state = "Enabled" - encryption_type = "SystemManaged" + encryption_type = "ServiceManaged" } ``` @@ -41,7 +41,7 @@ The following arguments are supported: * `encryption_state` - (Optional) Is Encryption enabled on this Data Lake Store Account? Possible values are `Enabled` or `Disabled`. Defaults to `Enabled`. -* `encryption_type` - (Optional) The Encryption Type used for this Data Lake Store Account. Currently can be set to `SystemManaged` when `encryption_state` is `Enabled` - and must be a blank string when it's Disabled. +* `encryption_type` - (Optional) The Encryption Type used for this Data Lake Store Account. Currently can be set to `ServiceManaged` when `encryption_state` is `Enabled` - and must be a blank string when it's Disabled. -> **NOTE:** Support for User Managed encryption will be supported in the future once a bug in the API is fixed. diff --git a/website/docs/r/databricks_workspace.html.markdown b/website/docs/r/databricks_workspace.html.markdown index 09084693d162..c200849e8574 100644 --- a/website/docs/r/databricks_workspace.html.markdown +++ b/website/docs/r/databricks_workspace.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_databricks_workspace" "test" { location = "${azurerm_resource_group.test.location}" sku = "standard" - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/dev_test_lab.html.markdown b/website/docs/r/dev_test_lab.html.markdown index 7025cb052601..6f69cdd8faf1 100644 --- a/website/docs/r/dev_test_lab.html.markdown +++ b/website/docs/r/dev_test_lab.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_dev_test_lab" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Sydney" = "Australia" } } diff --git a/website/docs/r/dev_test_linux_virtual_machine.html.markdown b/website/docs/r/dev_test_linux_virtual_machine.html.markdown index e62ec999e341..b2f7266ff6cb 100644 --- a/website/docs/r/dev_test_linux_virtual_machine.html.markdown +++ b/website/docs/r/dev_test_linux_virtual_machine.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_dev_test_lab" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Sydney" = "Australia" } } diff --git a/website/docs/r/dev_test_policy.html.markdown b/website/docs/r/dev_test_policy.html.markdown index 9a4d21a03ec6..c4db2f05dbf4 100644 --- a/website/docs/r/dev_test_policy.html.markdown +++ b/website/docs/r/dev_test_policy.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_dev_test_lab" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Sydney" = "Australia" } } @@ -37,7 +37,7 @@ resource "azurerm_dev_test_policy" "test" { threshold = "999" evaluator_type = "MaxValuePolicy" - tags { + tags = { "Acceptance" = "Test" } } diff --git a/website/docs/r/dev_test_virtual_network.html.markdown b/website/docs/r/dev_test_virtual_network.html.markdown index 87fb3403f69e..05cb20eead5b 100644 --- a/website/docs/r/dev_test_virtual_network.html.markdown +++ b/website/docs/r/dev_test_virtual_network.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_dev_test_lab" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Sydney" = "Australia" } } diff --git a/website/docs/r/dev_test_windows_virtual_machine.html.markdown b/website/docs/r/dev_test_windows_virtual_machine.html.markdown index 7655f5d97ab1..27a75e9133eb 100644 --- a/website/docs/r/dev_test_windows_virtual_machine.html.markdown +++ b/website/docs/r/dev_test_windows_virtual_machine.html.markdown @@ -23,7 +23,7 @@ resource "azurerm_dev_test_lab" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { "Sydney" = "Australia" } } diff --git a/website/docs/r/devspace_controller.html.markdown b/website/docs/r/devspace_controller.html.markdown index 924cbcbd6d41..f161ca776150 100644 --- a/website/docs/r/devspace_controller.html.markdown +++ b/website/docs/r/devspace_controller.html.markdown @@ -50,7 +50,7 @@ resource "azurerm_devspace_controller" test { target_container_host_resource_id = "${azurerm_kubernetes_cluster.test.id}" target_container_host_credentials_base64 = "${base64encode(azurerm_kubernetes_cluster.test.kube_config_raw)}" - tags { + tags = { Environment = "Testing" } } diff --git a/website/docs/r/dns_caa_record.html.markdown b/website/docs/r/dns_caa_record.html.markdown index 4ab33a4f1c8e..0e8688921ed1 100644 --- a/website/docs/r/dns_caa_record.html.markdown +++ b/website/docs/r/dns_caa_record.html.markdown @@ -53,7 +53,7 @@ resource "azurerm_dns_caa_record" "test" { value = "mailto:terraform@nonexisting.tld" } - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/dns_mx_record.html.markdown b/website/docs/r/dns_mx_record.html.markdown index 61a3d445bf2f..aa5f86123490 100644 --- a/website/docs/r/dns_mx_record.html.markdown +++ b/website/docs/r/dns_mx_record.html.markdown @@ -39,7 +39,7 @@ resource "azurerm_dns_mx_record" "test" { exchange = "mail2.contoso.com" } - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/dns_ns_record.html.markdown b/website/docs/r/dns_ns_record.html.markdown index 103a8b1e3e37..655cf7278feb 100644 --- a/website/docs/r/dns_ns_record.html.markdown +++ b/website/docs/r/dns_ns_record.html.markdown @@ -31,7 +31,7 @@ resource "azurerm_dns_ns_record" "test" { records = ["ns1.contoso.com", "ns2.contoso.com"] - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/dns_srv_record.html.markdown b/website/docs/r/dns_srv_record.html.markdown index 8b5e5b3bc64d..6c4388e65703 100644 --- a/website/docs/r/dns_srv_record.html.markdown +++ b/website/docs/r/dns_srv_record.html.markdown @@ -36,7 +36,7 @@ resource "azurerm_dns_srv_record" "test" { target = "target1.contoso.com" } - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/dns_txt_record.html.markdown b/website/docs/r/dns_txt_record.html.markdown index b4bb632a3076..729fee670e60 100644 --- a/website/docs/r/dns_txt_record.html.markdown +++ b/website/docs/r/dns_txt_record.html.markdown @@ -37,7 +37,7 @@ resource "azurerm_dns_txt_record" "test" { value = "more site information here" } - tags { + tags = { Environment = "Production" } } diff --git a/website/docs/r/eventgrid_domain.html.markdown b/website/docs/r/eventgrid_domain.html.markdown new file mode 100644 index 000000000000..f7b25747b80e --- /dev/null +++ b/website/docs/r/eventgrid_domain.html.markdown @@ -0,0 +1,91 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_eventgrid_domain" +sidebar_current: "docs-azurerm-resource-messaging-eventgrid-domain" +description: |- + Manages an EventGrid Domain + +--- + +# azurerm_eventgrid_domain + +Manages an EventGrid Domain + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "resourceGroup1" + location = "West US 2" +} + +resource "azurerm_eventgrid_domain" "test" { + name = "my-eventgrid-domain" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Production" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the EventGrid Domain resource. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which the EventGrid Domain exists. Changing this forces a new resource to be created. + +* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + +* `input_schema` - (Optional) Specifies the schema in which incoming events will be published to this domain. Allowed values are `cloudeventv01schema`, `customeventschema`, or `eventgridschema`. Defaults to `eventgridschema`. Changing this forces a new resource to be created. + +* `input_mapping_fields` - (Optional) A `input_mapping_fields` block as defined below. + +* `input_mapping_default_values` - (Optional) A `input_mapping_default_values` block as defined below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +--- + +A `input_mapping_fields` supports the following: + +* `id` - (Optional) Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `topic` - (Optional) Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `event_type` - (Optional) Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `event_time` - (Optional) Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `data_version` - (Optional) Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `subject` - (Optional) Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +--- + +A `input_mapping_default_values` supports the following: + +* `event_type` - (Optional) Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `data_version` - (Optional) Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +* `subject` - (Optional) Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the EventGrid Domain. + +* `endpoint` - The Endpoint associated with the EventGrid Domain. + +## Import + +EventGrid Domain's can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_eventgrid_domain.domain1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventGrid/domains/domain1 +``` diff --git a/website/docs/r/eventgrid_topic.html.markdown b/website/docs/r/eventgrid_topic.html.markdown index 585789c26fa9..293cb4a3e3e5 100644 --- a/website/docs/r/eventgrid_topic.html.markdown +++ b/website/docs/r/eventgrid_topic.html.markdown @@ -26,7 +26,7 @@ resource "azurerm_eventgrid_topic" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/eventhub.html.markdown b/website/docs/r/eventhub.html.markdown index 91e79ac82971..402d5b5dd2fd 100644 --- a/website/docs/r/eventhub.html.markdown +++ b/website/docs/r/eventhub.html.markdown @@ -26,7 +26,7 @@ resource "azurerm_eventhub_namespace" "test" { capacity = 1 kafka_enabled = false - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/eventhub_authorization_rule.html.markdown b/website/docs/r/eventhub_authorization_rule.html.markdown index aba16a4c836a..fb19eb708a41 100644 --- a/website/docs/r/eventhub_authorization_rule.html.markdown +++ b/website/docs/r/eventhub_authorization_rule.html.markdown @@ -25,7 +25,7 @@ resource "azurerm_eventhub_namespace" "test" { sku = "Basic" capacity = 2 - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/eventhub_consumer_group.html.markdown b/website/docs/r/eventhub_consumer_group.html.markdown index 0f71c1c6c425..20a4a3e149e0 100644 --- a/website/docs/r/eventhub_consumer_group.html.markdown +++ b/website/docs/r/eventhub_consumer_group.html.markdown @@ -25,7 +25,7 @@ resource "azurerm_eventhub_namespace" "test" { sku = "Basic" capacity = 2 - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/eventhub_namespace.html.markdown b/website/docs/r/eventhub_namespace.html.markdown index 29eed09fd074..823e77ecd0a5 100644 --- a/website/docs/r/eventhub_namespace.html.markdown +++ b/website/docs/r/eventhub_namespace.html.markdown @@ -25,7 +25,7 @@ resource "azurerm_eventhub_namespace" "test" { sku = "Standard" capacity = 2 - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/eventhub_namespace_authorization_rule.html.markdown b/website/docs/r/eventhub_namespace_authorization_rule.html.markdown index 751788a3a455..aefe6af559a2 100644 --- a/website/docs/r/eventhub_namespace_authorization_rule.html.markdown +++ b/website/docs/r/eventhub_namespace_authorization_rule.html.markdown @@ -25,7 +25,7 @@ resource "azurerm_eventhub_namespace" "test" { sku = "Basic" capacity = 2 - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/express_route_circuit.html.markdown b/website/docs/r/express_route_circuit.html.markdown index 4ea358349f52..e2f7649d0ea2 100644 --- a/website/docs/r/express_route_circuit.html.markdown +++ b/website/docs/r/express_route_circuit.html.markdown @@ -31,7 +31,7 @@ resource "azurerm_express_route_circuit" "test" { family = "MeteredData" } - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/express_route_circuit_authorization.html.markdown b/website/docs/r/express_route_circuit_authorization.html.markdown index e77c01152b7f..7d2d768f2792 100644 --- a/website/docs/r/express_route_circuit_authorization.html.markdown +++ b/website/docs/r/express_route_circuit_authorization.html.markdown @@ -33,7 +33,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/express_route_circuit_peering.html.markdown b/website/docs/r/express_route_circuit_peering.html.markdown index c8032f16755f..1b347459bda3 100644 --- a/website/docs/r/express_route_circuit_peering.html.markdown +++ b/website/docs/r/express_route_circuit_peering.html.markdown @@ -33,7 +33,7 @@ resource "azurerm_express_route_circuit" "test" { allow_classic_operations = false - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/function_app.html.markdown b/website/docs/r/function_app.html.markdown index aba312628e5b..e04c646de18d 100644 --- a/website/docs/r/function_app.html.markdown +++ b/website/docs/r/function_app.html.markdown @@ -136,6 +136,8 @@ The following arguments are supported: * `websockets_enabled` - (Optional) Should WebSockets be enabled? +* `linux_fx_version` - (Optional) Linux App Framework and version for the AppService, e.g. `DOCKER|(golang:latest)`. + --- `identity` supports the following: @@ -157,6 +159,8 @@ The following attributes are exported: * `site_credential` - A `site_credential` block as defined below, which contains the site-level credentials used to publish to this App Service. +* `kind` - The Function App kind - such as `functionapp,linux,container` + --- `identity` exports the following: diff --git a/website/docs/r/iothub.html.markdown b/website/docs/r/iothub.html.markdown index 10fdf75e58b8..f3e99e327af6 100644 --- a/website/docs/r/iothub.html.markdown +++ b/website/docs/r/iothub.html.markdown @@ -63,7 +63,11 @@ resource "azurerm_iothub" "test" { enabled = true } - tags { + fallback_route { + enabled = true + } + + tags = { "purpose" = "testing" } } @@ -85,6 +89,8 @@ The following arguments are supported: * `route` - (Optional) A `route` block as defined below. +* `fallback_route` - (Optional) A `fallback_route` block as defined below. If the fallback route is enabled, messages that don't match any of the supplied routes are automatically sent to this route. Defaults to messages/events. + * `tags` - (Optional) A mapping of tags to assign to the resource. --- @@ -123,7 +129,7 @@ An `endpoint` block supports the following: A `route` block supports the following: -* `name` - (Required) The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique. +* `name` - (Required) The name of the route. * `source` - (Required) The source that the routing rule is to be applied to, such as `DeviceMessages`. Possible values include: `RoutingSourceInvalid`, `RoutingSourceDeviceMessages`, `RoutingSourceTwinChangeEvents`, `RoutingSourceDeviceLifecycleEvents`, `RoutingSourceDeviceJobLifecycleEvents`. @@ -133,6 +139,18 @@ A `route` block supports the following: * `enabled` - (Required) Used to specify whether a route is enabled. +--- + +A `fallback_route` block supports the following: + +* `source` - (Optional) The source that the routing rule is to be applied to, such as `DeviceMessages`. Possible values include: `RoutingSourceInvalid`, `RoutingSourceDeviceMessages`, `RoutingSourceTwinChangeEvents`, `RoutingSourceDeviceLifecycleEvents`, `RoutingSourceDeviceJobLifecycleEvents`. + +* `condition` - (Optional) The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. + +* `endpoint_names` - (Optional) The endpoints to which messages that satisfy the condition are routed. Currently only 1 endpoint is allowed. + +* `enabled` - (Optional) Used to specify whether the fallback route is enabled. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/iothub_consumer_group.html.markdown b/website/docs/r/iothub_consumer_group.html.markdown index dfb4fff52580..2b8d86f365d5 100644 --- a/website/docs/r/iothub_consumer_group.html.markdown +++ b/website/docs/r/iothub_consumer_group.html.markdown @@ -29,7 +29,7 @@ resource "azurerm_iothub" "test" { capacity = "1" } - tags { + tags = { "purpose" = "testing" } } diff --git a/website/docs/r/key_vault.html.markdown b/website/docs/r/key_vault.html.markdown index 5e3685b1fc84..b2b95b635fac 100644 --- a/website/docs/r/key_vault.html.markdown +++ b/website/docs/r/key_vault.html.markdown @@ -49,7 +49,7 @@ resource "azurerm_key_vault" "test" { bypass = "AzureServices" } - tags { + tags = { environment = "Production" } } @@ -93,7 +93,7 @@ A `access_policy` block supports the following: * `application_id` - (Optional) The object ID of an Application in Azure Active Directory. -* `certificate_permissions` - (Optional) List of certificate permissions, must be one or more from the following: `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, `managecontacts`, `manageissuers`, `purge`, `recover`, `setissuers` and `update`. +* `certificate_permissions` - (Optional) List of certificate permissions, must be one or more from the following: `backup`, `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, `managecontacts`, `manageissuers`, `purge`, `recover`, `restore`, `setissuers` and `update`. * `key_permissions` - (Required) List of key permissions, must be one or more from the following: `backup`, `create`, `decrypt`, `delete`, `encrypt`, `get`, `import`, `list`, `purge`, `recover`, `restore`, `sign`, `unwrapKey`, `update`, `verify` and `wrapKey`. @@ -116,7 +116,7 @@ A `network_acls` block supports the following: A `sku` block supports the following: -* `name` - (Required) The Name of the SKU used for this Key Vault. Possible values are `Standard` and `Premium`. +* `name` - (Required) The Name of the SKU used for this Key Vault. Possible values are `standard` and `premium`. ## Attributes Reference diff --git a/website/docs/r/key_vault_access_policy.html.markdown b/website/docs/r/key_vault_access_policy.html.markdown index 0d49cca64a5d..31a5ac34729a 100644 --- a/website/docs/r/key_vault_access_policy.html.markdown +++ b/website/docs/r/key_vault_access_policy.html.markdown @@ -35,7 +35,7 @@ resource "azurerm_key_vault" "test" { enabled_for_disk_encryption = true - tags { + tags = { environment = "Production" } } @@ -79,8 +79,8 @@ The following arguments are supported: * `application_id` - (Optional) The object ID of an Application in Azure Active Directory. * `certificate_permissions` - (Optional) List of certificate permissions, must be one or more from - the following: `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, - `managecontacts`, `manageissuers`, `purge`, `recover`, `setissuers` and `update`. + the following: `backup`, `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, + `managecontacts`, `manageissuers`, `purge`, `recover`, `restore`, `setissuers` and `update`. * `key_permissions` - (Required) List of key permissions, must be one or more from the following: `backup`, `create`, `decrypt`, `delete`, `encrypt`, `get`, `import`, `list`, `purge`, diff --git a/website/docs/r/key_vault_certificate.html.markdown b/website/docs/r/key_vault_certificate.html.markdown index 6c79b733702b..a4453cec20dd 100644 --- a/website/docs/r/key_vault_certificate.html.markdown +++ b/website/docs/r/key_vault_certificate.html.markdown @@ -83,14 +83,14 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_certificate" "test" { - name = "imported-cert" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + name = "imported-cert" + key_vault_id = "${azurerm_key_vault.test.id}" certificate { contents = "${base64encode(file("certificate-to-import.pfx"))}" @@ -158,14 +158,14 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_certificate" "test" { - name = "generated-cert" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + name = "generated-cert" + key_vault_id = "${azurerm_key_vault.test.id}" certificate_policy { issuer_parameters { @@ -225,7 +225,7 @@ The following arguments are supported: * `name` - (Required) Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created. -* `vault_uri` - (Required) Specifies the URI used to access the Key Vault instance, available on the `azurerm_key_vault` resource. +* `key_vault_id` - (Required) The ID of the Key Vault where the Certificate should be created. * `certificate` - (Optional) A `certificate` block as defined below, used to Import an existing certificate. diff --git a/website/docs/r/key_vault_key.html.markdown b/website/docs/r/key_vault_key.html.markdown index a256b8f7d514..022ece97dcde 100644 --- a/website/docs/r/key_vault_key.html.markdown +++ b/website/docs/r/key_vault_key.html.markdown @@ -30,7 +30,7 @@ resource "random_id" "server" { } resource "azurerm_key_vault" "test" { - name = "${format("%s%s", "kv", random_id.server.hex)}" + name = "keyvaultkeyexample" location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" tenant_id = "${data.azurerm_client_config.current.tenant_id}" @@ -53,14 +53,14 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_key" "generated" { name = "generated-certificate" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + key_vault_id = "${azurerm_key_vault.test.id}" key_type = "RSA" key_size = 2048 @@ -81,7 +81,7 @@ The following arguments are supported: * `name` - (Required) Specifies the name of the Key Vault Key. Changing this forces a new resource to be created. -* `vault_uri` - (Required) Specifies the URI used to access the Key Vault instance, available on the `azurerm_key_vault` resource. +* `key_vault_id` - (Required) The ID of the Key Vault where the Key should be created. * `key_type` - (Required) Specifies the Key Type to use for this Key Vault Key. Possible values are `EC` (Elliptic Curve), `Oct` (Octet), `RSA` and `RSA-HSM`. Changing this forces a new resource to be created. diff --git a/website/docs/r/key_vault_secret.html.markdown b/website/docs/r/key_vault_secret.html.markdown index 1de443188fb2..152e8163045b 100644 --- a/website/docs/r/key_vault_secret.html.markdown +++ b/website/docs/r/key_vault_secret.html.markdown @@ -58,17 +58,17 @@ resource "azurerm_key_vault" "test" { ] } - tags { + tags = { environment = "Production" } } resource "azurerm_key_vault_secret" "test" { - name = "secret-sauce" - value = "szechuan" - vault_uri = "${azurerm_key_vault.test.vault_uri}" + name = "secret-sauce" + value = "szechuan" + key_vault_id = "${azurerm_key_vault.test.id}" - tags { + tags = { environment = "Production" } } @@ -82,7 +82,7 @@ The following arguments are supported: * `value` - (Required) Specifies the value of the Key Vault Secret. -* `vault_uri` - (Required) Specifies the URI used to access the Key Vault instance, available on the `azurerm_key_vault` resource. +* `key_vault_id` - (Required) The ID of the Key Vault where the Secret should be created. * `content_type` - (Optional) Specifies the content type for the Key Vault Secret. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 0372ff3487e6..38be895ecf46 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -41,7 +41,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "00000000000000000000000000000000" } - tags { + tags = { Environment = "Production" } } @@ -146,13 +146,13 @@ A `network_profile` block supports the following: -> **NOTE:** When `network_plugin` is set to `azure` - the `vnet_subnet_id` field in the `agent_pool_profile` block must be set. -* `dns_service_ip` - (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). This is required when `network_plugin` is set to `kubenet`. Changing this forces a new resource to be created. +* `dns_service_ip` - (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). This is required when `network_plugin` is set to `azure`. Changing this forces a new resource to be created. -* `docker_bridge_cidr` - (Optional) IP address (in CIDR notation) used as the Docker bridge IP address on nodes. This is required when `network_plugin` is set to `kubenet`. Changing this forces a new resource to be created. +* `docker_bridge_cidr` - (Optional) IP address (in CIDR notation) used as the Docker bridge IP address on nodes. This is required when `network_plugin` is set to `azure`. Changing this forces a new resource to be created. * `pod_cidr` - (Optional) The CIDR to use for pod IP addresses. This field can only be set when `network_plugin` is set to `kubenet`. Changing this forces a new resource to be created. -* `service_cidr` - (Optional) The Network Range used by the Kubernetes service. This is required when `network_plugin` is set to `kubenet`. Changing this forces a new resource to be created. +* `service_cidr` - (Optional) The Network Range used by the Kubernetes service. This is required when `network_plugin` is set to `azure`. Changing this forces a new resource to be created. ~> **NOTE:** This range should not be used by any network element on or connected to this VNet. Service address CIDR must be smaller than /12. diff --git a/website/docs/r/loadbalancer_outbound_rule.html.markdown b/website/docs/r/loadbalancer_outbound_rule.html.markdown new file mode 100644 index 000000000000..6052cf127f80 --- /dev/null +++ b/website/docs/r/loadbalancer_outbound_rule.html.markdown @@ -0,0 +1,92 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_lb_outbound_rule" +sidebar_current: "docs-azurerm-resource-loadbalancer-outbound-rule" +description: |- + Manages a Load Balancer Outbound Rule. +--- + +# azurerm_lb_outbound_rule + +Manages a Load Balancer Outbound Rule. + +~> **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration and a Backend Address Pool Attached. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "LoadBalancerRG" + location = "West US" +} + +resource "azurerm_public_ip" "test" { + name = "PublicIPForLB" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + allocation_method = "Static" +} + +resource "azurerm_lb" "test" { + name = "TestLoadBalancer" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + frontend_ip_configuration { + name = "PublicIPAddress" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "be-%d" +} + +resource "azurerm_lb_outbound_rule" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "OutboundRule" + protocol = "Tcp" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" + + frontend_ip_configuration { + name = "PublicIPAddress" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the Outbound Rule. Changing this forces a new resource to be created. +* `resource_group_name` - (Required) The name of the resource group in which to create the resource. Changing this forces a new resource to be created. +* `loadbalancer_id` - (Required) The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created. +* `frontend_ip_configuration` - (Required) One or more `frontend_ip_configuration` blocks as defined below. +* `backend_address_pool_id` - (Required) The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs. +* `protocol` - (Required) The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`. +* `enable_tcp_reset` - (Optional) Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP. +* `allocated_outbound_ports` - (Optional) The number of outbound ports to be used for NAT. +* `idle_timeout_in_minutes` - (Optional) The timeout for the TCP idle connection + +--- + +A `frontend_ip_configuration` block supports the following: + +* `name` - (Required) The name of the Frontend IP Configuration. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Load Balancer to which the resource is attached. + +## Import + +Load Balancer Outbound Rules can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_lb_outbound_rule.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/outboundRules/rule1 +``` diff --git a/website/docs/r/log_analytics_linked_service.html.markdown b/website/docs/r/log_analytics_linked_service.html.markdown new file mode 100644 index 000000000000..ae97126863a1 --- /dev/null +++ b/website/docs/r/log_analytics_linked_service.html.markdown @@ -0,0 +1,86 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_log_analytics_linked_service" +sidebar_current: "docs-azurerm-log-analytics-linked-service" +description: |- + Manages a Log Analytics (formally Operational Insights) Linked Service. +--- + +# azurerm_log_analytics_linked_service + +Links a Log Analytics (formally Operational Insights) Workspace to another resource. The (currently) only linkable service is an Azure Automation Account. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "resourcegroup-01" + location = "West Europe" +} + +resource "azurerm_automation_account" "test" { + name = "automation-01" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + sku { + name = "Basic" + } + + tags = { + environment = "development" + } +} + +resource "azurerm_log_analytics_workspace" "test" { + name = "workspace-01" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "PerGB2018" + retention_in_days = 30 +} + +resource "azurerm_log_analytics_linked_service" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + workspace_name = "${azurerm_log_analytics_workspace.test.name}" + resource_id = "${azurerm_automation_account.test.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `resource_group_name` - (Required) The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created. + +* `workspace_name` - (Required) Name of the Log Analytics Workspace that will contain the linkedServices resource. Changing this forces a new resource to be created. + +* `linked_service_name` - (Optional) Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in `workspace_name`. Currently it defaults to and only supports `automation` as a value. Changing this forces a new resource to be created. + +* `resource_id` - (Optional) The ID of the Resource that will be linked to the workspace. + +* `linked_service_properties` - (Optional **Deprecated**) A `linked_service_properties` block as defined below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +--- + +`linked_service_properties` supports the following: + +* `resource_id` - (Optional **Deprecated**) The resource id of the resource that will be linked to the workspace. This field has been deprecated in favour of the top-level `resource_id` field and will be removed in v2.0 of the AzureRM Provider. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Log Analytics Linked Service ID. + +* `name` - The automatically generated name of the Linked Service. This cannot be specified. The format is always `/` e.g. `workspace1/Automation` + +## Import + +Log Analytics Workspaces can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_log_analytics_linked_service.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedservices/automation +``` diff --git a/website/docs/r/log_analytics_solution.html.markdown b/website/docs/r/log_analytics_solution.html.markdown index b6d744255439..cf65a4cae2db 100644 --- a/website/docs/r/log_analytics_solution.html.markdown +++ b/website/docs/r/log_analytics_solution.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Resource Manager: azurerm_log_analytics_solution" -sidebar_current: "docs-azurerm-oms-log-analytics-solution" +sidebar_current: "docs-azurerm-log-analytics-solution" description: |- Manages a Log Analytics (formally Operational Insights) Solution. --- diff --git a/website/docs/r/log_analytics_workspace.html.markdown b/website/docs/r/log_analytics_workspace.html.markdown index 7d2001f48cda..9dd8a7884362 100644 --- a/website/docs/r/log_analytics_workspace.html.markdown +++ b/website/docs/r/log_analytics_workspace.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Resource Manager: azurerm_log_analytics_workspace" -sidebar_current: "docs-azurerm-oms-log-analytics-workspace-x" +sidebar_current: "docs-azurerm-log-analytics-workspace-x" description: |- Manages a Log Analytics (formally Operational Insights) Workspace. --- diff --git a/website/docs/r/log_analytics_workspace_linked_service.html.markdown b/website/docs/r/log_analytics_workspace_linked_service.html.markdown index fe2717fdc186..57720a9c0f02 100644 --- a/website/docs/r/log_analytics_workspace_linked_service.html.markdown +++ b/website/docs/r/log_analytics_workspace_linked_service.html.markdown @@ -1,7 +1,7 @@ --- layout: "azurerm" page_title: "Azure Resource Manager: azurerm_log_analytics_workspace_linked_service" -sidebar_current: "docs-azurerm-oms-log-analytics-workspace-linked-service" +sidebar_current: "docs-azurerm-log-analytics-workspace-linked-service" description: |- Manages a Log Analytics (formally Operational Insights) Linked Service. --- @@ -10,6 +10,9 @@ description: |- Links a Log Analytics (formally Operational Insights) Workspace to another resource. The (currently) only linkable service is an Azure Automation Account. +~> **NOTE:** This resource has been deprecated in favour of the `azurerm_log_analytics_linked_service` resource and will be removed in the next major version of the AzureRM Provider. The new resource shares the same fields as this one, and information on migrating across [can be found in this guide](../guides/migrating-between-renamed-resources.html). + + ## Example Usage ```hcl @@ -27,7 +30,7 @@ resource "azurerm_automation_account" "test" { name = "Basic" } - tags { + tags = { environment = "development" } } @@ -43,10 +46,7 @@ resource "azurerm_log_analytics_workspace" "test" { resource "azurerm_log_analytics_workspace_linked_service" "test" { resource_group_name = "${azurerm_resource_group.test.name}" workspace_name = "${azurerm_log_analytics_workspace.test.name}" - - linked_service_properties { - resource_id = "${azurerm_automation_account.test.id}" - } + resource_id = "${azurerm_automation_account.test.id}" } ``` @@ -60,13 +60,17 @@ The following arguments are supported: * `linked_service_name` - (Optional) Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in `workspace_name`. Currently it defaults to and only supports `automation` as a value. Changing this forces a new resource to be created. -* `linked_service_properties` - (Required) A `linked_service_properties` block as defined below. +* `resource_id` - (Optional) The ID of the Resource that will be linked to the workspace. + +* `linked_service_properties` - (Optional **Deprecated**) A `linked_service_properties` block as defined below. * `tags` - (Optional) A mapping of tags to assign to the resource. +--- + `linked_service_properties` supports the following: -* `resource_id` - (Required) The resource id of the resource that will be linked to the workspace. +* `resource_id` - (Optional **Deprecated**) The resource id of the resource that will be linked to the workspace. This field has been deprecated in favour of the top-level `resource_id` field and will be removed in v2.0 of the AzureRM Provider. ## Attributes Reference @@ -81,5 +85,5 @@ The following attributes are exported: Log Analytics Workspaces can be imported using the `resource id`, e.g. ```shell -terraform import azurerm_log_analytics_workspace_linked_service.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/automation +terraform import azurerm_log_analytics_workspace_linked_service.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedservices/automation ``` diff --git a/website/docs/r/managed_disk.html.markdown b/website/docs/r/managed_disk.html.markdown index fb893b59278c..9b235d3b260f 100644 --- a/website/docs/r/managed_disk.html.markdown +++ b/website/docs/r/managed_disk.html.markdown @@ -26,7 +26,7 @@ resource "azurerm_managed_disk" "test" { create_option = "Empty" disk_size_gb = "1" - tags { + tags = { environment = "staging" } } @@ -48,7 +48,7 @@ resource "azurerm_managed_disk" "source" { create_option = "Empty" disk_size_gb = "1" - tags { + tags = { environment = "staging" } } @@ -62,7 +62,7 @@ resource "azurerm_managed_disk" "copy" { source_resource_id = "${azurerm_managed_disk.source.id}" disk_size_gb = "1" - tags { + tags = { environment = "staging" } } diff --git a/website/docs/r/management_group.html.markdown b/website/docs/r/management_group.html.markdown index dcb7251e2779..db1433c02a07 100644 --- a/website/docs/r/management_group.html.markdown +++ b/website/docs/r/management_group.html.markdown @@ -15,11 +15,22 @@ Manages a Management Group. ```hcl data "azurerm_subscription" "current" {} -resource "azurerm_management_group" "test" { +resource "azurerm_management_group" "example_parent" { + display_name = "ParentGroup" subscription_ids = [ "${data.azurerm_subscription.current.id}", ] } + +resource "azurerm_management_group" "example_child" { + display_name = "ChildGroup" + parent_management_group_id = "${azurerm_management_group.example_parent.id}" + + subscription_ids = [ + "${data.azurerm_subscription.current.id}", + # other subscription IDs can go here + ] +} ``` ## Argument Reference diff --git a/website/docs/r/media_services_account.html.markdown b/website/docs/r/media_services_account.html.markdown new file mode 100644 index 000000000000..2da9b975e5eb --- /dev/null +++ b/website/docs/r/media_services_account.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_media_services_account" +sidebar_current: "docs-azurerm-resource-media-media-services-account" +description: |- + Manages a Media Services Account. +--- + +# azurerm_media_services_account + +Manages a Media Services Account. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "media-resources" + location = "West Europe" +} + +resource "azurerm_storage_account" "test" { + name = "examplestoracc" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "GRS" +} + +resource "azurerm_media_services_account" "test" { + name = "examplemediaacc" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + storage_account { + id = "${azurerm_storage_account.test.id}" + is_primary = true + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the Media Services Account. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to create the Media Services Account. Changing this forces a new resource to be created. + +* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + +* `storage_account` - (Required) One or more `storage_account` blocks as defined below. + +--- + +A `storage_account` block supports the following: + +* `id` - (Required) Specifies the ID of the Storage Account that will be associated with the Media Services instance. + +* `is_primary` - (Required) Specifies whether the storage account should be the primary account or not. Defaults to `false`. + +~> **NOTE:** Whilst multiple `storage_account` blocks can be specified - one of them must be set to the primary + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Resource ID of the Media Services Account. + +## Import + +Media Services Accounts can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_media_services_account.account /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Media/mediaservices/account1 +``` \ No newline at end of file diff --git a/website/docs/r/metric_alertrule.html.markdown b/website/docs/r/metric_alertrule.html.markdown index 83e370aba643..39ad24290b54 100644 --- a/website/docs/r/metric_alertrule.html.markdown +++ b/website/docs/r/metric_alertrule.html.markdown @@ -11,6 +11,8 @@ description: |- Manages a [metric-based alert rule](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitor-quick-resource-metric-alert-portal) in Azure Monitor. +~> **NOTE:** This resource has been deprecated in favour of the `azurerm_monitor_metric_alertrule` resource and will be removed in the next major version of the AzureRM Provider. The new resource shares the same fields as this one, and information on migrating across [can be found in this guide](../guides/migrating-between-renamed-resources.html). + ## Example Usage (CPU Percentage of a virtual machine) ```hcl diff --git a/website/docs/r/monitor_activity_log_alert.html.markdown b/website/docs/r/monitor_activity_log_alert.html.markdown index b01ccc1d4996..7cbca34b7da3 100644 --- a/website/docs/r/monitor_activity_log_alert.html.markdown +++ b/website/docs/r/monitor_activity_log_alert.html.markdown @@ -52,7 +52,7 @@ resource "azurerm_monitor_activity_log_alert" "main" { action { action_group_id = "${azurerm_monitor_action_group.main.id}" - webhook_properties { + webhook_properties = { from = "terraform" } } diff --git a/website/docs/r/monitor_autoscale_setting.html.markdown b/website/docs/r/monitor_autoscale_setting.html.markdown new file mode 100644 index 000000000000..bc86e18a7e15 --- /dev/null +++ b/website/docs/r/monitor_autoscale_setting.html.markdown @@ -0,0 +1,410 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_monitor_autoscale_setting" +sidebar_current: "docs-azurerm-resource-monitor-autoscale-setting" +description: |- + Manages an AutoScale Setting which can be applied to Virtual Machine Scale Sets, App Services and other scalable resources. +--- + +# azurerm_monitor_autoscale_setting + +Manages a AutoScale Setting which can be applied to Virtual Machine Scale Sets, App Services and other scalable resources. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "autoscalingTest" + location = "West US" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + # ... +} + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "myAutoscaleSetting" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "defaultProfile" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 75 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = "1" + cooldown = "PT1M" + } + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "LessThan" + threshold = 25 + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = "1" + cooldown = "PT1M" + } + } + } + + notification { + email { + send_to_subscription_administrator = true + send_to_subscription_co_administrator = true + custom_emails = ["admin@contoso.com"] + } + } +} +``` + +## Example Usage (repeating on weekends) + +```hcl +resource "azurerm_resource_group" "test" { + name = "autoscalingTest" + location = "West US" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + # ... +} + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "myAutoscaleSetting" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "Weekends" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 90 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = "2" + cooldown = "PT1M" + } + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "LessThan" + threshold = 10 + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = "2" + cooldown = "PT1M" + } + } + + recurrence { + frequency = "Week" + timezone = "Pacific Standard Time" + days = ["Saturday", "Sunday"] + hours = [12] + minutes = [0] + } + } + + notification { + email { + send_to_subscription_administrator = true + send_to_subscription_co_administrator = true + custom_emails = ["admin@contoso.com"] + } + } +} +``` + +## Example Usage (for fixed dates) + +```hcl +resource "azurerm_resource_group" "test" { + name = "autoscalingTest" + location = "West US" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + # ... +} + +resource "azurerm_monitor_autoscale_setting" "test" { + name = "myAutoscaleSetting" + enabled = true + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + target_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + + profile { + name = "forJuly" + + capacity { + default = 1 + minimum = 1 + maximum = 10 + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = 90 + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = "2" + cooldown = "PT1M" + } + } + + rule { + metric_trigger { + metric_name = "Percentage CPU" + metric_resource_id = "${azurerm_virtual_machine_scale_set.test.id}" + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "LessThan" + threshold = 10 + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = "2" + cooldown = "PT1M" + } + } + + fixed_date { + timezone = "Pacific Standard Time" + start = "2020-07-01T00:00:00Z" + end = "2020-07-31T23:59:59Z" + } + } + + notification { + email { + send_to_subscription_administrator = true + send_to_subscription_co_administrator = true + custom_emails = ["admin@contoso.com"] + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the AutoScale Setting. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group in the AutoScale Setting should be created. Changing this forces a new resource to be created. + +* `location` - (Required) Specifies the supported Azure location where the AutoScale Setting should exist. Changing this forces a new resource to be created. + +* `profile` - (Required) Specifies one or more (up to 20) `profile` blocks as defined below. + +* `target_resource_id` - (Required) Specifies the resource ID of the resource that the autoscale setting should be added to. + +* `enabled` - (Optional) Specifies whether automatic scaling is enabled for the target resource. Defaults to `true`. + +* `notification` - (Optional) Specifies a `notification` block as defined below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +--- + +A `profile` block supports the following: + +* `name` - (Required) Specifies the name of the profile. + +* `capacity` - (Required) A `capacity` block as defined below. + +* `rule` - (Required) One or more (up to 10) `rule` blocks as defined below. + +* `fixed_date` - (Optional) A `fixed_date` block as defined below. This cannot be specified if a `recurrence` block is specified. + +* `recurrence` - (Optional) A `recurrence` block as defined below. This cannot be specified if a `fixed_date` block is specified. + +--- + +A `capacity` block supports the following: + +* `default` - (Required) The number of instances that are available for scaling if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. Valid values are between `0` and `1000`. + +* `maximum` - (Required) The maximum number of instances for this resource. Valid values are between `0` and `1000`. + +-> **NOTE:** The maximum number of instances is also limited by the amount of Cores available in the subscription. + +* `minimum` - (Required) The minimum number of instances for this resource. Valid values are between `0` and `1000`. + +--- + +A `rule` block supports the following: + +* `metric_trigger` - (Required) A `metric_trigger` block as defined below. + +* `scale_action` - (Required) A `scale_action` block as defined below. + +--- + +A `metric_trigger` block supports the following: + +* `metric_name` - (Required) The name of the metric that defines what the rule monitors, such as `Percentage CPU` for `Virtual Machine Scale Sets` and `CpuPercentage` for `App Service Plan`. + +-> **NOTE:** The allowed value of `metric_name` highly depends on the targeting resource type, please visit [Supported metrics with Azure Monitor](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/metrics-supported) for more details. + +* `metric_resource_id` - (Required) The ID of the Resource which the Rule monitors. + +* `operator` - (Required) Specifies the operator used to compare the metric data and threshold. Possible values are: `Equals`, `NotEquals`, `GreaterThan`, `GreaterThanOrEqual`, `LessThan`, `LessThanOrEqual`. + +* `statistic` - (Required) Specifies how the metrics from multiple instances are combined. Possible values are `Average`, `Min` and `Max`. + +* `time_aggregation` - (Required) Specifies how the data that's collected should be combined over time. Possible values include `Average`, `Count`, `Maximum`, `Minimum`, `Last` and `Total`. Defaults to `Average`. + +* `time_grain` - (Required) Specifies the granularity of metrics that the rule monitors, which must be one of the pre-defined values returned from the metric definitions for the metric. This value must be between 1 minute and 12 hours an be formatted as an ISO 8601 string. + +* `time_window` - (Required) Specifies the time range for which data is collected, which must be greater than the delay in metric collection (which varies from resource to resource). This value must be between 5 minutes and 12 hours and be formatted as an ISO 8601 string. + +* `threshold` - (Required) Specifies the threshold of the metric that triggers the scale action. + +--- + +A `scale_action` block supports the following: + +* `cooldown` - (Required) The amount of time to wait since the last scaling action before this action occurs. Must be between 1 minute and 1 week and formatted as a ISO 8601 string. + +* `direction` - (Required) The scale direction. Possible values are `Increase` and `Decrease`. + +* `type` - (Required) The type of action that should occur. Possible values are `ChangeCount`, `ExactCount` and `PercentChangeCount`. + +* `value` - (Required) The number of instances involved in the scaling action. Defaults to `1`. + +--- + +A `fixed_date` block supports the following: + +* `end` - (Required) Specifies the end date for the profile, formatted as an RFC3339 date string. + +* `start` - (Required) Specifies the start date for the profile, formatted as an RFC3339 date string. + +* `timezone` (Optional) The Time Zone of the `start` and `end` times. A list of [possible values can be found here](https://msdn.microsoft.com/en-us/library/azure/dn931928.aspx). Defaults to `UTC`. + +--- + +A `recurrence` block supports the following: + +* `timezone` - (Required) The Time Zone used for the `hours` field. A list of [possible values can be found here](https://msdn.microsoft.com/en-us/library/azure/dn931928.aspx). Defaults to `UTC`. + +* `days` - (Required) A list of days that this profile takes effect on. Possible values include `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday`, `Saturday` and `Sunday`. + +* `hours` - (Required) A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered (in 24-hour time). Possible values are from `0` to `23`. + +* `minutes` - (Required) A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + +--- + +A `notification` block supports the following: + +* `email` - (Required) A `email` block as defined below. + +* `webhook` - (Optional) One or more `webhook` blocks as defined below. + +--- + +A `email` block supports the following: + +* `send_to_subscription_administrator` - (Optional) Should email notifications be sent to the subscription administrator? Defaults to `false`. + +* `send_to_subscription_co_administrator` - (Optional) Should email notifications be sent to the subscription co-administrator? Defaults to `false`. + +* `custom_emails` - (Optional) Specifies a list of custom email addresses to which the email notifications will be sent. + +--- + +A `webhook` block supports the following: + +* `service_uri` - (Required) The HTTPS URI which should receive scale notifications. + +* `properties` - (Optional) A map of settings. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the AutoScale Setting. + +## Import + +AutoScale Setting can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_monitor_autoscale_setting.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/microsoft.insights/autoscalesettings/setting1 +``` diff --git a/website/docs/r/monitor_metric_alertrule.html.markdown b/website/docs/r/monitor_metric_alertrule.html.markdown new file mode 100644 index 000000000000..69c868e44864 --- /dev/null +++ b/website/docs/r/monitor_metric_alertrule.html.markdown @@ -0,0 +1,156 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_monitor_metric_alertrule" +sidebar_current: "docs-azurerm-resource-monitor-metric-alertrule" +description: |- + Manages a metric-based alert rule in Azure Monitor. + +--- + +# azurerm_monitor_metric_alertrule + +Manages a [metric-based alert rule](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitor-quick-resource-metric-alert-portal) in Azure Monitor. + +## Example Usage (CPU Percentage of a virtual machine) + +```hcl +resource "azurerm_monitor_metric_alertrule" "test" { + name = "${azurerm_virtual_machine.test.name}-cpu" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + description = "An alert rule to watch the metric Percentage CPU" + + enabled = true + + resource_id = "${azurerm_virtual_machine.test.id}" + metric_name = "Percentage CPU" + operator = "GreaterThan" + threshold = 75 + aggregation = "Average" + period = "PT5M" + + email_action { + send_to_service_owners = false + + custom_emails = [ + "some.user@example.com", + ] + } + + webhook_action { + service_uri = "https://example.com/some-url" + + properties = { + severity = "incredible" + acceptance_test = "true" + } + } +} +``` + +## Example Usage (Storage usage of a SQL Database) + +```hcl +resource "azurerm_monitor_metric_alertrule" "test" { + name = "${azurerm_sql_database.test.name}-storage" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + description = "An alert rule to watch the metric Storage" + + enabled = true + + resource_id = "${azurerm_sql_database.test.id}" + metric_name = "storage" + operator = "GreaterThan" + threshold = 1073741824 + aggregation = "Maximum" + period = "PT10M" + + email_action { + send_to_service_owners = false + + custom_emails = [ + "some.user@example.com", + ] + } + + webhook_action { + service_uri = "https://example.com/some-url" + + properties = { + severity = "incredible" + acceptance_test = "true" + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the alert rule. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to create the alert rule. Changing this forces a new resource to be created. + +* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + +* `description` - (Optional) A verbose description of the alert rule that will be included in the alert email. + +* `enabled` - (Optional) If `true`, the alert rule is enabled. Defaults to `true`. + +--- + +* `resource_id` - (Required) The ID of the resource monitored by the alert rule. + +* `metric_name` - (Required) The metric that defines what the rule monitors. + +-> For a comprehensive reference of supported `metric_name` values for types of `resource` refer to [Supported metrics with Azure Monitor](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-supported-metrics) in the Azure documentation. In the referred table, the column "Metric" corresponds to supported values for `metric_name`. + +* `operator` - (Required) The operator used to compare the metric data and the threshold. Possible values are `GreaterThan`, `GreaterThanOrEqual`, `LessThan`, and `LessThanOrEqual`. + +* `threshold` - (Required) The threshold value that activates the alert. + +* `period` - (Required) The period of time formatted in [ISO 8601 duration format](https://en.wikipedia.org/wiki/ISO_8601#Durations) that is used to monitor the alert activity based on the threshold. The period must be between 5 minutes and 1 day. + +* `aggregation` - (Required) Defines how the metric data is combined over time. Possible values are `Average`, `Minimum`, `Maximum`, `Total`, and `Last`. + +--- + +* `email_action` - (Optional) A `email_action` block as defined below. + +* `webhook_action` - (Optional) A `webhook_action` block as defined below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. Changing this forces a new resource to be created. + +--- + +`email_action` supports the following: + +* `send_to_service_owners` - (Optional) If `true`, the administrators (service and co-administrators) of the subscription are notified when the alert is triggered. Defaults to `false`. + +* `custom_emails` - (Optional) A list of email addresses to be notified when the alert is triggered. + +--- + +`webhook_action` supports the following: + +* `service_uri` - (Required) The service uri of the webhook to POST the notification when the alert is triggered. + +* `properties` - (Optional) A dictionary of custom properties to include with the webhook POST operation payload. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the alert rule. + +## Import + +Metric Alert Rules can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_monitor_metric_alertrule.alertrule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.insights/alertrules/alertrule1 +``` diff --git a/website/docs/r/mssql_elasticpool.html.markdown b/website/docs/r/mssql_elasticpool.html.markdown index e36bf783a163..6c5ab9d20aaf 100644 --- a/website/docs/r/mssql_elasticpool.html.markdown +++ b/website/docs/r/mssql_elasticpool.html.markdown @@ -32,13 +32,13 @@ resource "azurerm_mssql_elasticpool" "test" { resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" server_name = "${azurerm_sql_server.test.name}" - max_size_bytes = 5368709120 + max_size_gb = 756 sku { name = "GP_Gen5" - capacity = 4 tier = "GeneralPurpose" family = "Gen5" + capacity = 4 } per_database_settings { @@ -64,7 +64,9 @@ The following arguments are supported: * `per_database_settings` - (Required) A `per_database_settings` block as defined below. -* `max_size_bytes` - (Optional) The max data size of the elastic pool in bytes. +* `max_size_gb` - (Optional) The max data size of the elastic pool in gigabytes. Conflicts with `max_size_bytes`. + +* `max_size_bytes` - (Optional) The max data size of the elastic pool in bytes. Conflicts with `max_size_gb`. * `tags` - (Optional) A mapping of tags to assign to the resource. @@ -78,7 +80,7 @@ The following arguments are supported: * `tier` - (Required) The tier of the particular SKU. Possible values are `GeneralPurpose`, `BusinessCritical`, `Basic`, `Standard`, or `Premium`. For more information see the documentation for your Elasticpool configuration: [vCore-based](https://docs.microsoft.com/en-us/azure/sql-database/sql-database-vcore-resource-limits-elastic-pools) or [DTU-based](https://docs.microsoft.com/en-us/azure/sql-database/sql-database-dtu-resource-limits-elastic-pools). -* `family` - (Required) The `family` of hardware `Gen4` or `Gen5`. +* `family` - (Optional) The `family` of hardware `Gen4` or `Gen5`. --- diff --git a/website/docs/r/mysql_server.html.markdown b/website/docs/r/mysql_server.html.markdown index 3ce1ab50282e..4d9c7b28345b 100644 --- a/website/docs/r/mysql_server.html.markdown +++ b/website/docs/r/mysql_server.html.markdown @@ -64,7 +64,7 @@ The following arguments are supported: * `version` - (Required) Specifies the version of MySQL to use. Valid values are `5.6` and `5.7`. Changing this forces a new resource to be created. -* `ssl_enforcement` - (Required) Specifies if SSL should be enforced on connections. Possible values are `Enforced` and `Disabled`. +* `ssl_enforcement` - (Required) Specifies if SSL should be enforced on connections. Possible values are `Enabled` and `Disabled`. * `tags` - (Optional) A mapping of tags to assign to the resource. diff --git a/website/docs/r/network_interface.html.markdown b/website/docs/r/network_interface.html.markdown index e20147e480c5..997a58e1fece 100644 --- a/website/docs/r/network_interface.html.markdown +++ b/website/docs/r/network_interface.html.markdown @@ -44,7 +44,7 @@ resource "azurerm_network_interface" "test" { private_ip_address_allocation = "Dynamic" } - tags { + tags = { environment = "staging" } } @@ -92,17 +92,19 @@ The `ip_configuration` block supports: * `application_gateway_backend_address_pools_ids` - (Optional / **Deprecated**) List of Application Gateway Backend Address Pool IDs references to which this NIC belongs --> **NOTE:** At this time Network Interface <-> Application Gateway Backend Address Pool associations need to be configured both using this field (which is now Deprecated) and/or using the `azurerm_network_interface_application_gateway_backend_address_pool_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. +-> **NOTE:** At this time Network Interface <-> Application Gateway Backend Address Pool associations need to be configured both using this field (which is now Deprecated) and using the `azurerm_network_interface_application_gateway_backend_address_pool_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. * `load_balancer_backend_address_pools_ids` - (Optional / **Deprecated**) List of Load Balancer Backend Address Pool IDs references to which this NIC belongs --> **NOTE:** At this time Network Interface <-> Load Balancer Backend Address Pool associations need to be configured both using this field (which is now Deprecated) and/or using the `azurerm_network_interface_backend_address_pool_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. +-> **NOTE:** At this time Network Interface <-> Load Balancer Backend Address Pool associations need to be configured both using this field (which is now Deprecated) and using the `azurerm_network_interface_backend_address_pool_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. * `load_balancer_inbound_nat_rules_ids` - (Optional / **Deprecated**) List of Load Balancer Inbound Nat Rules IDs involving this NIC --> **NOTE:** At this time Network Interface <-> Load Balancer Inbound NAT Rule associations need to be configured both using this field (which is now Deprecated) and/or using the `azurerm_network_interface_nat_rule_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. +-> **NOTE:** At this time Network Interface <-> Load Balancer Inbound NAT Rule associations need to be configured both using this field (which is now Deprecated) and using the `azurerm_network_interface_nat_rule_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. -* `application_security_group_ids` - (Optional) List of Application Security Group IDs which should be attached to this NIC +* `application_security_group_ids` - (Optional / **Deprecated**) List of Application Security Group IDs which should be attached to this NIC + +-> **NOTE:** At this time Network Interface <-> Application Security Group associations need to be configured both using this field (which is now Deprecated) and using the `azurerm_network_interface_application_security_group_association` resource. This field is deprecated and will be removed in favour of that resource in the next major version (2.0) of the AzureRM Provider. * `primary` - (Optional) Is this the Primary Network Interface? If set to `true` this should be the first `ip_configuration` in the array. diff --git a/website/docs/r/network_interface_application_gateway_backend_address_pool_association.html.markdown b/website/docs/r/network_interface_application_gateway_backend_address_pool_association.html.markdown index 1b965d3b16ba..cd8bb392841d 100644 --- a/website/docs/r/network_interface_application_gateway_backend_address_pool_association.html.markdown +++ b/website/docs/r/network_interface_application_gateway_backend_address_pool_association.html.markdown @@ -144,15 +144,15 @@ The following arguments are supported: The following attributes are exported: -* `id` - The (Terraform specific) ID of the Association between the Network Interface and the Load Balancers Backend Address Pool. +* `id` - The (Terraform specific) ID of the Association between the Network Interface and the Application Gateway Backend Address Pool. ## Import -Associations between Network Interfaces and Load Balancer Backend Address Pools can be imported using the `resource id`, e.g. +Associations between Network Interfaces and Application Gateway Backend Address Pools can be imported using the `resource id`, e.g. ```shell -terraform import azurerm_network_interface_application_gateway_backend_address_pool_association.association1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.network/networkInterfaces/nic1/ipConfigurations/example|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1 +terraform import azurerm_network_interface_application_gateway_backend_address_pool_association.association1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.network/networkInterfaces/nic1/ipConfigurations/example|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/applicationGateways/gateway1/backendAddressPools/pool1 ``` -> **NOTE:** This ID is specific to Terraform - and is of the format `{networkInterfaceId}/ipConfigurations/{ipConfigurationName}|{backendAddressPoolId}`. diff --git a/website/docs/r/network_interface_application_security_group_association.html.markdown b/website/docs/r/network_interface_application_security_group_association.html.markdown new file mode 100644 index 000000000000..876718d64021 --- /dev/null +++ b/website/docs/r/network_interface_application_security_group_association.html.markdown @@ -0,0 +1,87 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_network_interface_application_security_group_association" +sidebar_current: "docs-azurerm-resource-network-interface-application-security-group-association" +description: |- + Manages the association between a Network Interface and a Application Security Group + +--- + +# azurerm_network_interface_application_security_group_association + +Manages the association between a Network Interface and a Application Security Group. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "test" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_virtual_network" "test" { + name = "example-network" + address_space = ["10.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "internal" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.1.0/24" +} + +resource "azurerm_application_security_group" "test" { + name = "example-asg" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_network_interface" "test" { + name = "example-nic" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + application_security_group_ids = [ "${azurerm_application_security_group.test.id}" ] + } +} + +resource "azurerm_network_interface_application_security_group_association" "test" { + network_interface_id = "${azurerm_network_interface.test.id}" + ip_configuration_name = "testconfiguration1" + application_security_group_id = "${azurerm_application_security_group.test.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `network_interface_id` - (Required) The ID of the Network Interface. Changing this forces a new resource to be created. + +* `ip_configuration_name` - (Required) The Name of the IP Configuration within the Network Interface which should be connected to the Application Security Group. Changing this forces a new resource to be created. + +* `application_security_group_id` - (Required) The ID of the Application Security Group which this Network Interface which should be connected to. Changing this forces a new resource to be created. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The (Terraform specific) ID of the Association between the Network Interface and the Application Security Group. + +## Import + +Associations between Network Interfaces and Application Security Groups can be imported using the `resource id`, e.g. + + +```shell +terraform import azurerm_network_interface_application_security_group_association.association1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.network/networkInterfaces/nic1/ipConfigurations/example|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/applicationSecurityGroups/securityGroup1 +``` + +-> **NOTE:** This ID is specific to Terraform - and is of the format `{networkInterfaceId}/ipConfigurations/{ipConfigurationName}|{applicationSecurityGroupId}`. diff --git a/website/docs/r/network_security_group.html.markdown b/website/docs/r/network_security_group.html.markdown index 905ebdf5c207..4dace31b1538 100644 --- a/website/docs/r/network_security_group.html.markdown +++ b/website/docs/r/network_security_group.html.markdown @@ -40,7 +40,7 @@ resource "azurerm_network_security_group" "test" { destination_address_prefix = "*" } - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/postgresql_server.html.markdown b/website/docs/r/postgresql_server.html.markdown index 3e79d9e20621..a6661169bc7e 100644 --- a/website/docs/r/postgresql_server.html.markdown +++ b/website/docs/r/postgresql_server.html.markdown @@ -61,7 +61,7 @@ The following arguments are supported: * `administrator_login_password` - (Required) The Password associated with the `administrator_login` for the PostgreSQL Server. -* `version` - (Required) Specifies the version of PostgreSQL to use. Valid values are `9.5`, `9.6`, and `10.0`. Changing this forces a new resource to be created. +* `version` - (Required) Specifies the version of PostgreSQL to use. Valid values are `9.5`, `9.6`, `10`, `10.0`, and `10.2`. Changing this forces a new resource to be created. * `ssl_enforcement` - (Required) Specifies if SSL should be enforced on connections. Possible values are `Enabled` and `Disabled`. diff --git a/website/docs/r/public_ip.html.markdown b/website/docs/r/public_ip.html.markdown index d209d9b460c7..3e6c8bfbadf4 100644 --- a/website/docs/r/public_ip.html.markdown +++ b/website/docs/r/public_ip.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_public_ip" "test" { resource_group_name = "${azurerm_resource_group.test.name}" allocation_method = "Static" - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/recovery_services_protection_policy_vm.markdown b/website/docs/r/recovery_services_protection_policy_vm.markdown index 9ebe75912657..e72894465e27 100644 --- a/website/docs/r/recovery_services_protection_policy_vm.markdown +++ b/website/docs/r/recovery_services_protection_policy_vm.markdown @@ -27,8 +27,8 @@ resource "azurerm_recovery_services_vault" "example" { resource "azurerm_recovery_services_protection_policy_vm" "test" { name = "tfex-recovery-vault-policy" - resource_group_name = "${azurerm_resource_group.test.name}" - recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + resource_group_name = "${azurerm_resource_group.example.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.example.name}" timezone = "UTC" diff --git a/website/docs/r/redis_cache.html.markdown b/website/docs/r/redis_cache.html.markdown index dc493d6516f1..f9c785d76134 100644 --- a/website/docs/r/redis_cache.html.markdown +++ b/website/docs/r/redis_cache.html.markdown @@ -158,6 +158,8 @@ The pricing group for the Redis Family - either "C" or "P" at present. * `maxmemory_delta` - (Optional) The max-memory delta for this Redis instance. Defaults are shown below. * `maxmemory_policy` - (Optional) How Redis will select what to remove when `maxmemory` is reached. Defaults are shown below. +* `maxfragmentationmemory_reserved` - (Optional) Value in megabytes reserved to accommodate for memory fragmentation. Defaults are shown below. + * `rdb_backup_enabled` - (Optional) Is Backup Enabled? Only supported on Premium SKU's. * `rdb_backup_frequency` - (Optional) The Backup Frequency in Minutes. Only supported on Premium SKU's. Possible values are: `15`, `30`, `60`, `360`, `720` and `1440`. * `rdb_backup_max_snapshot_count` - (Optional) The maximum number of snapshots to create as a backup. Only supported for Premium SKU's. @@ -183,13 +185,14 @@ redis_configuration { ``` ## Default Redis Configuration Values -| Redis Value | Basic | Standard | Premium | -| ------------------ | ------------ | ------------ | ------------ | -| maxmemory_reserved | 2 | 50 | 200 | -| maxmemory_delta | 2 | 50 | 200 | -| maxmemory_policy | volatile-lru | volatile-lru | volatile-lru | - -_*Important*: The `maxmemory_reserved` and `maxmemory_delta` settings are only available for Standard and Premium caches. More details are available in the Relevant Links section below._ +| Redis Value | Basic | Standard | Premium | +| ------------------------------- | ------------ | ------------ | ------------ | +| maxmemory_reserved | 2 | 50 | 200 | +| maxfragmentationmemory_reserved | 2 | 50 | 200 | +| maxmemory_delta | 2 | 50 | 200 | +| maxmemory_policy | volatile-lru | volatile-lru | volatile-lru | + +_*Important*: The `maxmemory_reserved`, `maxmemory_delta` and `maxfragmentationmemory-reserved` settings are only available for Standard and Premium caches. More details are available in the Relevant Links section below._ * `patch_schedule` supports the following: diff --git a/website/docs/r/relay_namespace.html.markdown b/website/docs/r/relay_namespace.html.markdown index d2665898bcb8..1cef0b3e228a 100644 --- a/website/docs/r/relay_namespace.html.markdown +++ b/website/docs/r/relay_namespace.html.markdown @@ -28,7 +28,7 @@ resource "azurerm_relay_namespace" "test" { name = "Standard" } - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/resource_group.html.markdown b/website/docs/r/resource_group.html.markdown index 32375ee7133c..507319c77923 100644 --- a/website/docs/r/resource_group.html.markdown +++ b/website/docs/r/resource_group.html.markdown @@ -17,7 +17,7 @@ resource "azurerm_resource_group" "test" { name = "testResourceGroup1" location = "West US" - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/route_table.html.markdown b/website/docs/r/route_table.html.markdown index 991ae81697ae..43625f3320a8 100644 --- a/website/docs/r/route_table.html.markdown +++ b/website/docs/r/route_table.html.markdown @@ -31,7 +31,7 @@ resource "azurerm_route_table" "test" { next_hop_type = "vnetlocal" } - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/search_service.html.markdown b/website/docs/r/search_service.html.markdown index ed12a49efbd8..6c0ee5dc9785 100644 --- a/website/docs/r/search_service.html.markdown +++ b/website/docs/r/search_service.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_search_service" "test" { location = "${azurerm_resource_group.test.location}" sku = "Standard" - tags { + tags = { environment = "staging" database = "test" } diff --git a/website/docs/r/service_fabric_cluster.html.markdown b/website/docs/r/service_fabric_cluster.html.markdown index 0f5c8c46acfb..a60e63d94128 100644 --- a/website/docs/r/service_fabric_cluster.html.markdown +++ b/website/docs/r/service_fabric_cluster.html.markdown @@ -87,9 +87,9 @@ A `azure_active_directory` block supports the following: * `tenant_id` - (Required) The Azure Active Directory Tenant ID. Changing this forces a new resource to be created. -* `cluster_application_id` - (Required) The Azure Active Directory Client ID which should be used for the Cluster Application. Changing this forces a new resource to be created. +* `cluster_application_id` - (Required) The Azure Active Directory Cluster Application ID. Changing this forces a new resource to be created. -* `cluster_application_id` - (Required) The Azure Active Directory Client ID which should be used for the Client Application. Changing this forces a new resource to be created. +* `client_application_id` - (Required) The Azure Active Directory Client ID which should be used for the Client Application. Changing this forces a new resource to be created. --- @@ -147,6 +147,10 @@ A `node_type` block supports the following: * `name` - (Required) The name of the Node Type. Changing this forces a new resource to be created. +* `placement_properties` - (Optional) The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run. + +* `capacities` - (Optional) The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has. + * `instance_count` - (Required) The number of nodes for this Node Type. * `is_primary` - (Required) Is this the Primary Node Type? Changing this forces a new resource to be created. diff --git a/website/docs/r/servicebus_namespace.html.markdown b/website/docs/r/servicebus_namespace.html.markdown index afe072561f8c..d3e6ecf119de 100644 --- a/website/docs/r/servicebus_namespace.html.markdown +++ b/website/docs/r/servicebus_namespace.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/servicebus_namespace_authorization_rule.html.markdown b/website/docs/r/servicebus_namespace_authorization_rule.html.markdown index d0c10dc2ed10..a8a8a84b79de 100644 --- a/website/docs/r/servicebus_namespace_authorization_rule.html.markdown +++ b/website/docs/r/servicebus_namespace_authorization_rule.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/servicebus_queue.html.markdown b/website/docs/r/servicebus_queue.html.markdown index 5537e78622ee..b790e1df3e9c 100644 --- a/website/docs/r/servicebus_queue.html.markdown +++ b/website/docs/r/servicebus_queue.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } @@ -48,7 +48,7 @@ The following arguments are supported: * `namespace_name` - (Required) The name of the ServiceBus Namespace to create this queue in. Changing this forces a new resource to be created. -* `location` - (Required) Specifies the supported Azure location where the resource exists. +* `location` - (Optional / **Deprecated**) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. * `resource_group_name` - (Required) The name of the resource group in which to diff --git a/website/docs/r/servicebus_queue_authorization_rule.html.markdown b/website/docs/r/servicebus_queue_authorization_rule.html.markdown index 7f9bdae8cd38..fe1304efc895 100644 --- a/website/docs/r/servicebus_queue_authorization_rule.html.markdown +++ b/website/docs/r/servicebus_queue_authorization_rule.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/servicebus_subscription.html.markdown b/website/docs/r/servicebus_subscription.html.markdown index 4d6373bad67b..671d5ab7d019 100644 --- a/website/docs/r/servicebus_subscription.html.markdown +++ b/website/docs/r/servicebus_subscription.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/servicebus_subscription_rule.html.markdown b/website/docs/r/servicebus_subscription_rule.html.markdown index 43314e7e96bd..93d43395c681 100644 --- a/website/docs/r/servicebus_subscription_rule.html.markdown +++ b/website/docs/r/servicebus_subscription_rule.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } @@ -70,7 +70,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } @@ -99,7 +99,7 @@ resource "azurerm_servicebus_subscription_rule" "example" { subscription_name = "${azurerm_servicebus_subscription.example.name}" filter_type = "CorrelationFilter" - correlation_filter = { + correlation_filter { correlation_id = "high" label = "red" } diff --git a/website/docs/r/servicebus_topic.html.markdown b/website/docs/r/servicebus_topic.html.markdown index 2dc5954c6c3e..d474ee4f3f08 100644 --- a/website/docs/r/servicebus_topic.html.markdown +++ b/website/docs/r/servicebus_topic.html.markdown @@ -26,7 +26,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/servicebus_topic_authorization_rule.html.markdown b/website/docs/r/servicebus_topic_authorization_rule.html.markdown index ce7af0b9171d..e8dc30cb6965 100644 --- a/website/docs/r/servicebus_topic_authorization_rule.html.markdown +++ b/website/docs/r/servicebus_topic_authorization_rule.html.markdown @@ -24,7 +24,7 @@ resource "azurerm_servicebus_namespace" "example" { resource_group_name = "${azurerm_resource_group.example.name}" sku = "Standard" - tags { + tags = { source = "terraform" } } diff --git a/website/docs/r/shared_image.html.markdown b/website/docs/r/shared_image.html.markdown index 1c017aae3142..98e02eaa3f3d 100644 --- a/website/docs/r/shared_image.html.markdown +++ b/website/docs/r/shared_image.html.markdown @@ -27,7 +27,7 @@ resource "azurerm_shared_image_gallery" "test" { location = "${azurerm_resource_group.test.location}" description = "Shared images and things." - tags { + tags = { Hello = "There" World = "Example" } diff --git a/website/docs/r/shared_image_gallery.html.markdown b/website/docs/r/shared_image_gallery.html.markdown index 61886016ee1a..e6aeeb1aa67b 100644 --- a/website/docs/r/shared_image_gallery.html.markdown +++ b/website/docs/r/shared_image_gallery.html.markdown @@ -27,7 +27,7 @@ resource "azurerm_shared_image_gallery" "test" { location = "${azurerm_resource_group.test.location}" description = "Shared images and things." - tags { + tags = { Hello = "There" World = "Example" } @@ -54,6 +54,8 @@ The following attributes are exported: * `id` - The ID of the Shared Image Gallery. +* `unique_name` - The Unique Name for this Shared Image Gallery. + ## Import Shared Image Galleries can be imported using the `resource id`, e.g. diff --git a/website/docs/r/sql_database.html.markdown b/website/docs/r/sql_database.html.markdown index 528d40b24ab1..032d99c0f0d5 100644 --- a/website/docs/r/sql_database.html.markdown +++ b/website/docs/r/sql_database.html.markdown @@ -33,7 +33,7 @@ resource "azurerm_sql_database" "test" { location = "West US" server_name = "${azurerm_sql_server.test.name}" - tags { + tags = { environment = "production" } } diff --git a/website/docs/r/sql_server.html.markdown b/website/docs/r/sql_server.html.markdown index 8dc8c01a2e9c..667ab98af211 100644 --- a/website/docs/r/sql_server.html.markdown +++ b/website/docs/r/sql_server.html.markdown @@ -30,7 +30,7 @@ resource "azurerm_sql_server" "test" { administrator_login = "mradministrator" administrator_login_password = "thisIsDog11" - tags { + tags = { environment = "production" } } diff --git a/website/docs/r/storage_account.html.markdown b/website/docs/r/storage_account.html.markdown index e3fb7779818d..abf8f0c020dc 100644 --- a/website/docs/r/storage_account.html.markdown +++ b/website/docs/r/storage_account.html.markdown @@ -1,3 +1,4 @@ + --- layout: "azurerm" page_title: "Azure Resource Manager: azurerm_storage_account" @@ -25,7 +26,7 @@ resource "azurerm_storage_account" "testsa" { account_tier = "Standard" account_replication_type = "GRS" - tags { + tags = { environment = "staging" } } @@ -67,7 +68,7 @@ resource "azurerm_storage_account" "testsa" { virtual_network_subnet_ids = ["${azurerm_subnet.test.id}"] } - tags { + tags = { environment = "staging" } } @@ -145,21 +146,51 @@ any combination of `Logging`, `Metrics`, `AzureServices`, or `None`. The following attributes are exported in addition to the arguments listed above: * `id` - The storage account Resource ID. + * `primary_location` - The primary location of the storage account. + * `secondary_location` - The secondary location of the storage account. + * `primary_blob_endpoint` - The endpoint URL for blob storage in the primary location. + +* `primary_blob_host` - The hostname with port if applicable for blob storage in the primary location. + * `secondary_blob_endpoint` - The endpoint URL for blob storage in the secondary location. + +* `secondary_blob_host` - The hostname with port if applicable for blob storage in the secondary location. + * `primary_queue_endpoint` - The endpoint URL for queue storage in the primary location. + +* `primary_queue_host` - The hostname with port if applicable for queue storage in the primary location. + * `secondary_queue_endpoint` - The endpoint URL for queue storage in the secondary location. + +* `secondary_queue_host` - The hostname with port if applicable for queue storage in the secondary location. + * `primary_table_endpoint` - The endpoint URL for table storage in the primary location. + +* `primary_table_host` - The hostname with port if applicable for table storage in the primary location. + * `secondary_table_endpoint` - The endpoint URL for table storage in the secondary location. + +* `secondary_table_host` - The hostname with port if applicable for table storage in the secondary location. + * `primary_file_endpoint` - The endpoint URL for file storage in the primary location. -* `primary_access_key` - The primary access key for the storage account -* `secondary_access_key` - The secondary access key for the storage account -* `primary_connection_string` - The connection string associated with the primary location -* `secondary_connection_string` - The connection string associated with the secondary location -* `primary_blob_connection_string` - The connection string associated with the primary blob location -* `secondary_blob_connection_string` - The connection string associated with the secondary blob location + +* `primary_file_host` - The hostname with port if applicable for file storage in the primary location. + +* `primary_access_key` - The primary access key for the storage account. + +* `secondary_access_key` - The secondary access key for the storage account. + +* `primary_connection_string` - The connection string associated with the primary location. + +* `secondary_connection_string` - The connection string associated with the secondary location. + +* `primary_blob_connection_string` - The connection string associated with the primary blob location. + +* `secondary_blob_connection_string` - The connection string associated with the secondary blob location. + * `identity` - An `identity` block as defined below, which contains the Identity information for this Storage Account. --- diff --git a/website/docs/r/storage_container.html.markdown b/website/docs/r/storage_container.html.markdown index 7a58cdcc586f..ef4eb8a95faf 100644 --- a/website/docs/r/storage_container.html.markdown +++ b/website/docs/r/storage_container.html.markdown @@ -25,7 +25,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } diff --git a/website/docs/r/traffic_manager_endpoint.html.markdown b/website/docs/r/traffic_manager_endpoint.html.markdown index a06ab32c4072..131744b775d0 100644 --- a/website/docs/r/traffic_manager_endpoint.html.markdown +++ b/website/docs/r/traffic_manager_endpoint.html.markdown @@ -43,7 +43,7 @@ resource "azurerm_traffic_manager_profile" "test" { path = "/" } - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/traffic_manager_profile.html.markdown b/website/docs/r/traffic_manager_profile.html.markdown index 87db8a8c6fe9..e8785a589e2b 100644 --- a/website/docs/r/traffic_manager_profile.html.markdown +++ b/website/docs/r/traffic_manager_profile.html.markdown @@ -44,7 +44,7 @@ resource "azurerm_traffic_manager_profile" "test" { path = "/" } - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/virtual_machine.html.markdown b/website/docs/r/virtual_machine.html.markdown index 500dfac59027..64184588878c 100644 --- a/website/docs/r/virtual_machine.html.markdown +++ b/website/docs/r/virtual_machine.html.markdown @@ -86,7 +86,7 @@ resource "azurerm_virtual_machine" "main" { os_profile_linux_config { disable_password_authentication = false } - tags { + tags = { environment = "staging" } } diff --git a/website/docs/r/virtual_machine_extension.html.markdown b/website/docs/r/virtual_machine_extension.html.markdown index 154a7e3301dc..2186212679bd 100644 --- a/website/docs/r/virtual_machine_extension.html.markdown +++ b/website/docs/r/virtual_machine_extension.html.markdown @@ -57,7 +57,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } @@ -100,7 +100,7 @@ resource "azurerm_virtual_machine" "test" { disable_password_authentication = false } - tags { + tags = { environment = "staging" } } @@ -120,7 +120,7 @@ resource "azurerm_virtual_machine_extension" "test" { } SETTINGS - tags { + tags = { environment = "Production" } } diff --git a/website/docs/r/virtual_machine_scale_set.html.markdown b/website/docs/r/virtual_machine_scale_set.html.markdown index 0486ac5c7478..0b07a5207eb8 100644 --- a/website/docs/r/virtual_machine_scale_set.html.markdown +++ b/website/docs/r/virtual_machine_scale_set.html.markdown @@ -42,7 +42,7 @@ resource "azurerm_public_ip" "test" { allocation_method = "Static" domain_name_label = "${azurerm_resource_group.test.name}" - tags { + tags = { environment = "staging" } } @@ -157,7 +157,7 @@ resource "azurerm_virtual_machine_scale_set" "test" { } } - tags { + tags = { environment = "staging" } } @@ -192,7 +192,7 @@ resource "azurerm_storage_account" "test" { account_tier = "Standard" account_replication_type = "LRS" - tags { + tags = { environment = "staging" } } diff --git a/website/docs/r/virtual_network.html.markdown b/website/docs/r/virtual_network.html.markdown index 0cb830069758..c73e4c518a3e 100644 --- a/website/docs/r/virtual_network.html.markdown +++ b/website/docs/r/virtual_network.html.markdown @@ -63,7 +63,7 @@ resource "azurerm_virtual_network" "test" { security_group = "${azurerm_network_security_group.test.id}" } - tags { + tags = { environment = "Production" } }