diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fdcd5824f6a..66ecc8a6f11e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,18 @@ -## 0.2.0 (Unreleased) +## 0.2.1 (Unreleased) +## 0.2.0 (September 15, 2017) FEATURES: -* **New Resource:** `azurerm_container_group` [GH-333] [GH-311] +* **Support for authenticating using the Azure CLI** ([#316](https://github.com/terraform-providers/terraform-provider-azurerm/issues/316)) +* **New Resource:** `azurerm_container_group` ([#333](https://github.com/terraform-providers/terraform-provider-azurerm/issues/333)] [[#311](https://github.com/terraform-providers/terraform-provider-azurerm/issues/311)] [[#338](https://github.com/terraform-providers/terraform-provider-azurerm/issues/338)) IMPROVEMENTS: -* `azurerm_app_service_plan` - support for Linux App Services [GH-332] -* `azurerm_postgresql_server` - supporting additional storage sizes [GH-239] -* `azurerm_public_ip` - verifying the ID is valid before importing [GH-320] -* `azurerm_sql_server` - verifying the name is valid before creating [GH-323] -* `resource_group_name` - validation has been added to all resources that use this attribute [GH-330] +* `azurerm_app_service_plan` - support for Linux App Service Plans ([#332](https://github.com/terraform-providers/terraform-provider-azurerm/issues/332)) +* `azurerm_postgresql_server` - supporting additional storage sizes ([#239](https://github.com/terraform-providers/terraform-provider-azurerm/issues/239)) +* `azurerm_public_ip` - verifying the ID is valid before importing ([#320](https://github.com/terraform-providers/terraform-provider-azurerm/issues/320)) +* `azurerm_sql_server` - verifying the name is valid before creating ([#323](https://github.com/terraform-providers/terraform-provider-azurerm/issues/323)) +* `resource_group_name` - validation has been added to all resources that use this attribute ([#330](https://github.com/terraform-providers/terraform-provider-azurerm/issues/330)) ## 0.1.7 (September 11, 2017) diff --git a/azurerm/config.go b/azurerm/config.go index 5170d1022324..c04712b5792b 100644 --- a/azurerm/config.go +++ b/azurerm/config.go @@ -174,6 +174,36 @@ func setUserAgent(client *autorest.Client) { client.UserAgent = fmt.Sprintf("HashiCorp-Terraform-v%s", version) } +func (c *Config) getAuthorizationToken(oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) { + useServicePrincipal := c.ClientSecret != "" + + if useServicePrincipal { + spt, err := adal.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, endpoint) + if err != nil { + return nil, err + } + + auth := autorest.NewBearerAuthorizer(spt) + return auth, nil + } + + if c.IsCloudShell { + // load the refreshed tokens from the Azure CLI + err := c.LoadTokensFromAzureCLI() + if err != nil { + return nil, fmt.Errorf("Error loading the refreshed CloudShell tokens: %+v", err) + } + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, c.ClientID, endpoint, *c.AccessToken) + if err != nil { + return nil, err + } + + auth := autorest.NewBearerAuthorizer(spt) + return auth, nil +} + // getArmClient is a helper method which returns a fully instantiated // *ArmClient based on the Config's current settings. func (c *Config) getArmClient() (*ArmClient, error) { @@ -206,31 +236,30 @@ func (c *Config) getArmClient() (*ArmClient, error) { return nil, fmt.Errorf("Unable to configure OAuthConfig for tenant %s", c.TenantID) } + sender := autorest.CreateSender(withRequestLogging()) + // Resource Manager endpoints endpoint := env.ResourceManagerEndpoint - spt, err := adal.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, endpoint) + auth, err := c.getAuthorizationToken(oauthConfig, endpoint) if err != nil { return nil, err } - auth := autorest.NewBearerAuthorizer(spt) // Graph Endpoints graphEndpoint := env.GraphEndpoint - graphSpt, err := adal.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, graphEndpoint) + graphAuth, err := c.getAuthorizationToken(oauthConfig, graphEndpoint) if err != nil { return nil, err } - graphAuth := autorest.NewBearerAuthorizer(graphSpt) // Key Vault Endpoints - sender := autorest.CreateSender(withRequestLogging()) keyVaultAuth := autorest.NewBearerAuthorizerCallback(sender, func(tenantID, resource string) (*autorest.BearerAuthorizer, error) { - keyVaultSpt, err := adal.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, resource) + keyVaultSpt, err := c.getAuthorizationToken(oauthConfig, resource) if err != nil { return nil, err } - return autorest.NewBearerAuthorizer(keyVaultSpt), nil + return keyVaultSpt, nil }) // NOTE: these declarations should be left separate for clarity should the @@ -238,61 +267,61 @@ func (c *Config) getArmClient() (*ArmClient, error) { asc := compute.NewAvailabilitySetsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&asc.Client) asc.Authorizer = auth - asc.Sender = autorest.CreateSender(withRequestLogging()) + asc.Sender = sender client.availSetClient = asc uoc := compute.NewUsageClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&uoc.Client) uoc.Authorizer = auth - uoc.Sender = autorest.CreateSender(withRequestLogging()) + uoc.Sender = sender client.usageOpsClient = uoc vmeic := compute.NewVirtualMachineExtensionImagesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vmeic.Client) vmeic.Authorizer = auth - vmeic.Sender = autorest.CreateSender(withRequestLogging()) + vmeic.Sender = sender client.vmExtensionImageClient = vmeic vmec := compute.NewVirtualMachineExtensionsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vmec.Client) vmec.Authorizer = auth - vmec.Sender = autorest.CreateSender(withRequestLogging()) + vmec.Sender = sender client.vmExtensionClient = vmec vmic := compute.NewVirtualMachineImagesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vmic.Client) vmic.Authorizer = auth - vmic.Sender = autorest.CreateSender(withRequestLogging()) + vmic.Sender = sender client.vmImageClient = vmic vmssc := compute.NewVirtualMachineScaleSetsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vmssc.Client) vmssc.Authorizer = auth - vmssc.Sender = autorest.CreateSender(withRequestLogging()) + vmssc.Sender = sender client.vmScaleSetClient = vmssc vmc := compute.NewVirtualMachinesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vmc.Client) vmc.Authorizer = auth - vmc.Sender = autorest.CreateSender(withRequestLogging()) + vmc.Sender = sender client.vmClient = vmc agc := network.NewApplicationGatewaysClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&agc.Client) agc.Authorizer = auth - agc.Sender = autorest.CreateSender(withRequestLogging()) + agc.Sender = sender client.appGatewayClient = agc crc := containerregistry.NewRegistriesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&crc.Client) crc.Authorizer = auth - crc.Sender = autorest.CreateSender(withRequestLogging()) + crc.Sender = sender client.containerRegistryClient = crc csc := containerservice.NewContainerServicesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&csc.Client) csc.Authorizer = auth - csc.Sender = autorest.CreateSender(withRequestLogging()) + csc.Sender = sender client.containerServicesClient = csc cgc := containerinstance.NewContainerGroupsClientWithBaseURI(endpoint, c.SubscriptionID) @@ -304,115 +333,115 @@ func (c *Config) getArmClient() (*ArmClient, error) { cdb := cosmosdb.NewDatabaseAccountsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&cdb.Client) cdb.Authorizer = auth - cdb.Sender = autorest.CreateSender(withRequestLogging()) + cdb.Sender = sender client.cosmosDBClient = cdb dkc := disk.NewDisksClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&dkc.Client) dkc.Authorizer = auth - dkc.Sender = autorest.CreateSender(withRequestLogging()) + dkc.Sender = sender client.diskClient = dkc img := compute.NewImagesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&img.Client) img.Authorizer = auth - img.Sender = autorest.CreateSender(withRequestLogging()) + img.Sender = sender client.imageClient = img egtc := eventgrid.NewTopicsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&egtc.Client) egtc.Authorizer = auth - egtc.Sender = autorest.CreateSender(withRequestLogging()) + egtc.Sender = sender client.eventGridTopicsClient = egtc ehc := eventhub.NewEventHubsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&ehc.Client) ehc.Authorizer = auth - ehc.Sender = autorest.CreateSender(withRequestLogging()) + ehc.Sender = sender client.eventHubClient = ehc chcgc := eventhub.NewConsumerGroupsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&chcgc.Client) chcgc.Authorizer = auth - chcgc.Sender = autorest.CreateSender(withRequestLogging()) + chcgc.Sender = sender client.eventHubConsumerGroupClient = chcgc ehnc := eventhub.NewNamespacesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&ehnc.Client) ehnc.Authorizer = auth - ehnc.Sender = autorest.CreateSender(withRequestLogging()) + ehnc.Sender = sender client.eventHubNamespacesClient = ehnc ifc := network.NewInterfacesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&ifc.Client) ifc.Authorizer = auth - ifc.Sender = autorest.CreateSender(withRequestLogging()) + ifc.Sender = sender client.ifaceClient = ifc erc := network.NewExpressRouteCircuitsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&erc.Client) erc.Authorizer = auth - erc.Sender = autorest.CreateSender(withRequestLogging()) + erc.Sender = sender client.expressRouteCircuitClient = erc lbc := network.NewLoadBalancersClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&lbc.Client) lbc.Authorizer = auth - lbc.Sender = autorest.CreateSender(withRequestLogging()) + lbc.Sender = sender client.loadBalancerClient = lbc lgc := network.NewLocalNetworkGatewaysClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&lgc.Client) lgc.Authorizer = auth - lgc.Sender = autorest.CreateSender(withRequestLogging()) + lgc.Sender = sender client.localNetConnClient = lgc pipc := network.NewPublicIPAddressesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&pipc.Client) pipc.Authorizer = auth - pipc.Sender = autorest.CreateSender(withRequestLogging()) + pipc.Sender = sender client.publicIPClient = pipc sgc := network.NewSecurityGroupsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sgc.Client) sgc.Authorizer = auth - sgc.Sender = autorest.CreateSender(withRequestLogging()) + sgc.Sender = sender client.secGroupClient = sgc src := network.NewSecurityRulesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&src.Client) src.Authorizer = auth - src.Sender = autorest.CreateSender(withRequestLogging()) + src.Sender = sender client.secRuleClient = src snc := network.NewSubnetsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&snc.Client) snc.Authorizer = auth - snc.Sender = autorest.CreateSender(withRequestLogging()) + snc.Sender = sender client.subnetClient = snc vgcc := network.NewVirtualNetworkGatewayConnectionsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vgcc.Client) vgcc.Authorizer = auth - vgcc.Sender = autorest.CreateSender(withRequestLogging()) + vgcc.Sender = sender client.vnetGatewayConnectionsClient = vgcc vgc := network.NewVirtualNetworkGatewaysClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vgc.Client) vgc.Authorizer = auth - vgc.Sender = autorest.CreateSender(withRequestLogging()) + vgc.Sender = sender client.vnetGatewayClient = vgc vnc := network.NewVirtualNetworksClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vnc.Client) vnc.Authorizer = auth - vnc.Sender = autorest.CreateSender(withRequestLogging()) + vnc.Sender = sender client.vnetClient = vnc vnpc := network.NewVirtualNetworkPeeringsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&vnpc.Client) vnpc.Authorizer = auth - vnpc.Sender = autorest.CreateSender(withRequestLogging()) + vnpc.Sender = sender client.vnetPeeringsClient = vnpc pcc := postgresql.NewConfigurationsClientWithBaseURI(endpoint, c.SubscriptionID) @@ -442,199 +471,199 @@ func (c *Config) getArmClient() (*ArmClient, error) { rtc := network.NewRouteTablesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&rtc.Client) rtc.Authorizer = auth - rtc.Sender = autorest.CreateSender(withRequestLogging()) + rtc.Sender = sender client.routeTablesClient = rtc rc := network.NewRoutesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&rc.Client) rc.Authorizer = auth - rc.Sender = autorest.CreateSender(withRequestLogging()) + rc.Sender = sender client.routesClient = rc dn := dns.NewRecordSetsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&dn.Client) dn.Authorizer = auth - dn.Sender = autorest.CreateSender(withRequestLogging()) + dn.Sender = sender client.dnsClient = dn zo := dns.NewZonesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&zo.Client) zo.Authorizer = auth - zo.Sender = autorest.CreateSender(withRequestLogging()) + zo.Sender = sender client.zonesClient = zo rgc := resources.NewGroupsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&rgc.Client) rgc.Authorizer = auth - rgc.Sender = autorest.CreateSender(withRequestLogging()) + rgc.Sender = sender client.resourceGroupClient = rgc pc := resources.NewProvidersClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&pc.Client) pc.Authorizer = auth - pc.Sender = autorest.CreateSender(withRequestLogging()) + pc.Sender = sender client.providers = pc tc := resources.NewTagsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&tc.Client) tc.Authorizer = auth - tc.Sender = autorest.CreateSender(withRequestLogging()) + tc.Sender = sender client.tagsClient = tc rf := resources.NewGroupClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&rf.Client) rf.Authorizer = auth - rf.Sender = autorest.CreateSender(withRequestLogging()) + rf.Sender = sender client.resourceFindClient = rf subgc := subscriptions.NewGroupClientWithBaseURI(endpoint) setUserAgent(&subgc.Client) subgc.Authorizer = auth - subgc.Sender = autorest.CreateSender(withRequestLogging()) + subgc.Sender = sender client.subscriptionsGroupClient = subgc jc := scheduler.NewJobsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&jc.Client) jc.Authorizer = auth - jc.Sender = autorest.CreateSender(withRequestLogging()) + jc.Sender = sender client.jobsClient = jc jcc := scheduler.NewJobCollectionsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&jcc.Client) jcc.Authorizer = auth - jcc.Sender = autorest.CreateSender(withRequestLogging()) + jcc.Sender = sender client.jobsCollectionsClient = jcc ssc := storage.NewAccountsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&ssc.Client) ssc.Authorizer = auth - ssc.Sender = autorest.CreateSender(withRequestLogging()) + ssc.Sender = sender client.storageServiceClient = ssc suc := storage.NewUsageClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&suc.Client) suc.Authorizer = auth - suc.Sender = autorest.CreateSender(withRequestLogging()) + suc.Sender = sender client.storageUsageClient = suc cpc := cdn.NewProfilesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&cpc.Client) cpc.Authorizer = auth - cpc.Sender = autorest.CreateSender(withRequestLogging()) + cpc.Sender = sender client.cdnProfilesClient = cpc cec := cdn.NewEndpointsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&cec.Client) cec.Authorizer = auth - cec.Sender = autorest.CreateSender(withRequestLogging()) + cec.Sender = sender client.cdnEndpointsClient = cec dc := resources.NewDeploymentsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&dc.Client) dc.Authorizer = auth - dc.Sender = autorest.CreateSender(withRequestLogging()) + dc.Sender = sender client.deploymentsClient = dc tmpc := trafficmanager.NewProfilesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&tmpc.Client) tmpc.Authorizer = auth - tmpc.Sender = autorest.CreateSender(withRequestLogging()) + tmpc.Sender = sender client.trafficManagerProfilesClient = tmpc tmec := trafficmanager.NewEndpointsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&tmec.Client) tmec.Authorizer = auth - tmec.Sender = autorest.CreateSender(withRequestLogging()) + tmec.Sender = sender client.trafficManagerEndpointsClient = tmec rdc := redis.NewGroupClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&rdc.Client) rdc.Authorizer = auth - rdc.Sender = autorest.CreateSender(withRequestLogging()) + rdc.Sender = sender client.redisClient = rdc sesc := search.NewServicesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sesc.Client) sesc.Authorizer = auth - sesc.Sender = autorest.CreateSender(withRequestLogging()) + sesc.Sender = sender client.searchServicesClient = sesc sbnc := servicebus.NewNamespacesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sbnc.Client) sbnc.Authorizer = auth - sbnc.Sender = autorest.CreateSender(withRequestLogging()) + sbnc.Sender = sender client.serviceBusNamespacesClient = sbnc sbqc := servicebus.NewQueuesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sbqc.Client) sbqc.Authorizer = auth - sbqc.Sender = autorest.CreateSender(withRequestLogging()) + sbqc.Sender = sender client.serviceBusQueuesClient = sbqc sbtc := servicebus.NewTopicsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sbtc.Client) sbtc.Authorizer = auth - sbtc.Sender = autorest.CreateSender(withRequestLogging()) + sbtc.Sender = sender client.serviceBusTopicsClient = sbtc sbsc := servicebus.NewSubscriptionsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sbsc.Client) sbsc.Authorizer = auth - sbsc.Sender = autorest.CreateSender(withRequestLogging()) + sbsc.Sender = sender client.serviceBusSubscriptionsClient = sbsc sqldc := sql.NewDatabasesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sqldc.Client) sqldc.Authorizer = auth - sqldc.Sender = autorest.CreateSender(withRequestLogging()) + sqldc.Sender = sender client.sqlDatabasesClient = sqldc sqlfrc := sql.NewFirewallRulesClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sqlfrc.Client) sqlfrc.Authorizer = auth - sqlfrc.Sender = autorest.CreateSender(withRequestLogging()) + sqlfrc.Sender = sender client.sqlFirewallRulesClient = sqlfrc sqlepc := sql.NewElasticPoolsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sqlepc.Client) sqlepc.Authorizer = auth - sqlepc.Sender = autorest.CreateSender(withRequestLogging()) + sqlepc.Sender = sender client.sqlElasticPoolsClient = sqlepc sqlsrv := sql.NewServersClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&sqlsrv.Client) sqlsrv.Authorizer = auth - sqlsrv.Sender = autorest.CreateSender(withRequestLogging()) + sqlsrv.Sender = sender client.sqlServersClient = sqlsrv aspc := web.NewAppServicePlansClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&aspc.Client) aspc.Authorizer = auth - aspc.Sender = autorest.CreateSender(withRequestLogging()) + aspc.Sender = sender client.appServicePlansClient = aspc ai := appinsights.NewComponentsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&ai.Client) ai.Authorizer = auth - ai.Sender = autorest.CreateSender(withRequestLogging()) + ai.Sender = sender client.appInsightsClient = ai spc := graphrbac.NewServicePrincipalsClientWithBaseURI(graphEndpoint, c.TenantID) setUserAgent(&spc.Client) spc.Authorizer = graphAuth - spc.Sender = autorest.CreateSender(withRequestLogging()) + spc.Sender = sender client.servicePrincipalsClient = spc ac := web.NewAppsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&ac.Client) ac.Authorizer = auth - ac.Sender = autorest.CreateSender(withRequestLogging()) + ac.Sender = sender client.appsClient = ac kvc := keyvault.NewVaultsClientWithBaseURI(endpoint, c.SubscriptionID) setUserAgent(&kvc.Client) kvc.Authorizer = auth - kvc.Sender = autorest.CreateSender(withRequestLogging()) + kvc.Sender = sender client.keyVaultClient = kvc kvmc := keyVault.New() diff --git a/azurerm/provider.go b/azurerm/provider.go index 00be3b0184a8..f06b6e165b25 100644 --- a/azurerm/provider.go +++ b/azurerm/provider.go @@ -8,8 +8,11 @@ import ( "log" "strings" "sync" + "time" "github.com/Azure/azure-sdk-for-go/arm/resources/resources" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/cli" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/helper/mutexkv" "github.com/hashicorp/terraform/helper/schema" @@ -23,25 +26,25 @@ func Provider() terraform.ResourceProvider { Schema: map[string]*schema.Schema{ "subscription_id": { Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), }, "client_id": { Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), }, "client_secret": { Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), }, "tenant_id": { Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), }, @@ -151,17 +154,24 @@ func Provider() terraform.ResourceProvider { type Config struct { ManagementURL string - SubscriptionID string + // Core ClientID string - ClientSecret string + SubscriptionID string TenantID string Environment string SkipProviderRegistration bool + // Service Principal Auth + ClientSecret string + + // Bearer Auth + AccessToken *adal.Token + IsCloudShell bool + validateCredentialsOnce sync.Once } -func (c *Config) validate() error { +func (c *Config) validateServicePrincipal() error { var err *multierror.Error if c.SubscriptionID == "" { @@ -183,6 +193,116 @@ func (c *Config) validate() error { return err.ErrorOrNil() } +func (c *Config) validateBearerAuth() error { + var err *multierror.Error + + if c.AccessToken == nil { + err = multierror.Append(err, fmt.Errorf("Access Token was not found in your Azure CLI Credentials.\n\nPlease login to the Azure CLI again via `az login`")) + } + + if c.ClientID == "" { + err = multierror.Append(err, fmt.Errorf("Client ID was not found in your Azure CLI Credentials.\n\nPlease login to the Azure CLI again via `az login`")) + } + + if c.SubscriptionID == "" { + err = multierror.Append(err, fmt.Errorf("Subscription ID was not found in your Azure CLI Credentials.\n\nPlease login to the Azure CLI again via `az login`")) + } + + if c.TenantID == "" { + err = multierror.Append(err, fmt.Errorf("Tenant ID was not found in your Azure CLI Credentials.\n\nPlease login to the Azure CLI again via `az login`")) + } + + return err.ErrorOrNil() +} + +func (c *Config) LoadTokensFromAzureCLI() error { + profilePath, err := cli.ProfilePath() + if err != nil { + return fmt.Errorf("Error loading the Profile Path from the Azure CLI: %+v", err) + } + + profile, err := cli.LoadProfile(profilePath) + if err != nil { + return fmt.Errorf("Azure CLI Authorization Profile was not found. Please ensure the Azure CLI is installed and then log-in with `az login`.") + } + + // pull out the TenantID and Subscription ID from the Azure Profile + for _, subscription := range profile.Subscriptions { + if subscription.IsDefault { + c.SubscriptionID = subscription.ID + c.TenantID = subscription.TenantID + c.Environment = normalizeEnvironmentName(subscription.EnvironmentName) + break + } + } + + foundToken := false + if c.TenantID != "" { + // pull out the ClientID and the AccessToken from the Azure Access Token + tokensPath, err := cli.AccessTokensPath() + if err != nil { + return fmt.Errorf("Error loading the Tokens Path from the Azure CLI: %+v", err) + } + + tokens, err := cli.LoadTokens(tokensPath) + if err != nil { + return fmt.Errorf("Azure CLI Authorization Tokens were not found. Please ensure the Azure CLI is installed and then log-in with `az login`.") + } + + for _, accessToken := range tokens { + token, err := accessToken.ToADALToken() + if err != nil { + return fmt.Errorf("[DEBUG] Error converting access token to token: %+v", err) + } + + expirationDate, err := cli.ParseExpirationDate(accessToken.ExpiresOn) + if err != nil { + return fmt.Errorf("Error parsing expiration date: %q", accessToken.ExpiresOn) + } + + if expirationDate.UTC().Before(time.Now().UTC()) { + log.Printf("[DEBUG] Token '%s' has expired", token.AccessToken) + continue + } + + if !strings.Contains(accessToken.Resource, "management") { + log.Printf("[DEBUG] Resource '%s' isn't a management domain", accessToken.Resource) + continue + } + + if !strings.HasSuffix(accessToken.Authority, c.TenantID) { + log.Printf("[DEBUG] Resource '%s' isn't for the correct Tenant", accessToken.Resource) + continue + } + + c.ClientID = accessToken.ClientID + c.AccessToken = &token + c.IsCloudShell = accessToken.RefreshToken == "" + foundToken = true + break + } + } + + if !foundToken { + return fmt.Errorf("No valid (unexpired) Azure CLI Auth Tokens found. Please run `az login`.") + } + + return nil +} + +func normalizeEnvironmentName(input string) string { + // Environment is stored as `Azure{Environment}Cloud` + output := strings.ToLower(input) + output = strings.TrimPrefix(output, "azure") + output = strings.TrimSuffix(output, "cloud") + + // however Azure Public is `AzureCloud` in the CLI Profile and not `AzurePublicCloud`. + if output == "" { + return "public" + } + return output +} + func providerConfigure(p *schema.Provider) schema.ConfigureFunc { return func(d *schema.ResourceData) (interface{}, error) { config := &Config{ @@ -194,8 +314,20 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { SkipProviderRegistration: d.Get("skip_provider_registration").(bool), } - if err := config.validate(); err != nil { - return nil, err + if config.ClientSecret != "" { + log.Printf("[DEBUG] Client Secret specified - using Service Principal for Authentication") + if err := config.validateServicePrincipal(); err != nil { + return nil, err + } + } else { + log.Printf("[DEBUG] No Client Secret specified - loading credentials from Azure CLI") + if err := config.LoadTokensFromAzureCLI(); err != nil { + return nil, err + } + + if err := config.validateBearerAuth(); err != nil { + return nil, fmt.Errorf("Please specify either a Service Principal, or log in with the Azure CLI (using `az login`)") + } } client, err := config.getArmClient() diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go new file mode 100644 index 000000000000..e87911e835d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -0,0 +1,6 @@ +// +build !windows + +package adal + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go new file mode 100644 index 000000000000..80f8004327f1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package adal + +import ( + "os" + "strings" +) + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 559fc6653583..2ac8c3c22040 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -15,12 +15,12 @@ import ( "strings" "time" + "github.com/Azure/go-autorest/autorest/date" "github.com/dgrijalva/jwt-go" ) const ( defaultRefresh = 5 * time.Minute - tokenBaseDate = "1970-01-01T00:00:00Z" // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow OAuthGrantTypeDeviceCode = "device_code" @@ -31,16 +31,10 @@ const ( // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows OAuthGrantTypeRefreshToken = "refresh_token" - // managedIdentitySettingsPath is the path to the MSI Extension settings file (to discover the endpoint) - managedIdentitySettingsPath = "/var/lib/waagent/ManagedIdentity-Settings" + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" ) -var expirationBase time.Time - -func init() { - expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) -} - // OAuthTokenProvider is an interface which should be implemented by an access token retriever type OAuthTokenProvider interface { OAuthToken() string @@ -76,7 +70,10 @@ func (t Token) Expires() time.Time { if err != nil { s = -3600 } - return expirationBase.Add(time.Duration(s) * time.Second).UTC() + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() } // IsExpired returns true if the Token is expired, false otherwise. @@ -135,9 +132,7 @@ type ServicePrincipalMSISecret struct { } // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// MSI extension requires the authority field to be set to the real tenant authority endpoint func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("authority", spt.oauthConfig.AuthorityEndpoint.String()) return nil } @@ -261,41 +256,43 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s ) } -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -func NewServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(oauthConfig, resource, managedIdentitySettingsPath, callbacks...) +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return getMSIVMEndpoint(msiPath) } -func newServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource, settingsPath string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { +func getMSIVMEndpoint(path string) (string, error) { // Read MSI settings - bytes, err := ioutil.ReadFile(settingsPath) + bytes, err := ioutil.ReadFile(path) if err != nil { - return nil, err + return "", err } msiSettings := struct { URL string `json:"url"` }{} err = json.Unmarshal(bytes, &msiSettings) if err != nil { - return nil, err + return "", err } + return msiSettings.URL, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { // We set the oauth config token endpoint to be MSI's endpoint - // We leave the authority as-is so MSI can POST it with the token request - msiEndpointURL, err := url.Parse(msiSettings.URL) + msiEndpointURL, err := url.Parse(msiEndpoint) if err != nil { return nil, err } - msiTokenEndpointURL, err := msiEndpointURL.Parse("/oauth2/token") + oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") if err != nil { return nil, err } - oauthConfig.TokenEndpoint = *msiTokenEndpointURL - spt := &ServicePrincipalToken{ - oauthConfig: oauthConfig, + oauthConfig: *oauthConfig, secret: &ServicePrincipalMSISecret{}, resource: resource, autoRefresh: true, @@ -364,16 +361,24 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { req.ContentLength = int64(len(s)) req.Header.Set(contentType, mimeTypeFormPost) + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Header.Set(metadataHeader, "true") + } resp, err := spt.sender.Do(req) if err != nil { return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) } + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'", resp.StatusCode) + if err != nil { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + } + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) } - rb, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 000000000000..b5b897c7df9e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,51 @@ +package cli + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + return homedir.Expand("~/.azure/azureProfile.json") +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 000000000000..a1f3af1517f0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,89 @@ +package cli + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: strconv.Itoa(int(difference.Seconds())), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +func AccessTokensPath() (string, error) { + return homedir.Expand("~/.azure/accessTokens.json") +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md new file mode 100644 index 000000000000..2f06ecacd039 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/README.md @@ -0,0 +1,81 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + var encStr string + switch enc { + case utfbom.UTF8: + encStr = "UTF8" + case utfbom.UTF16BigEndian: + encStr = "UTF16 big endian" + case utfbom.UTF16LittleEndian: + encStr = "UTF16 little endian" + case utfbom.UTF32BigEndian: + encStr = "UTF32 big endian" + case utfbom.UTF32LittleEndian: + encStr = "UTF32 little endian" + default: + encStr = "Unknown, no byte-order mark found" + } + fmt.Println("Detected encoding:", encStr) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown, no byte-order mark found +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 000000000000..648184a12dec --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,174 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 4247a13f0913..7f30c84b936a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -220,57 +220,66 @@ }, { "checksumSHA1": "+4d+Y67AMKKuyR1EO33Zdt+RVx0=", - "comment": "v8.3.0", + "comment": "v8.4.0", "path": "github.com/Azure/go-autorest/autorest", - "revision": "77a52603f06947221c672f10275abc9bf2c7d557", - "revisionTime": "2017-08-16T16:57:29Z", - "version": "v8.3.0", - "versionExact": "v8.3.0" + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" }, { - "checksumSHA1": "KOETWLsF6QW+lrPVPsMNHDZP+xA=", - "comment": "v8.3.0", + "checksumSHA1": "7G4HgRaIT25bgz/hPtXG6Kv8Fho=", + "comment": "v8.4.0", "path": "github.com/Azure/go-autorest/autorest/adal", - "revision": "77a52603f06947221c672f10275abc9bf2c7d557", - "revisionTime": "2017-08-16T16:57:29Z", - "version": "v8.3.0", - "versionExact": "v8.3.0" + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" }, { "checksumSHA1": "2KdBFgT4qY+fMOkBTa5vA9V0AiM=", - "comment": "v8.3.0", + "comment": "v8.4.0", "path": "github.com/Azure/go-autorest/autorest/azure", - "revision": "77a52603f06947221c672f10275abc9bf2c7d557", - "revisionTime": "2017-08-16T16:57:29Z", - "version": "v8.3.0", - "versionExact": "v8.3.0" + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" + }, + { + "checksumSHA1": "apxw17Dm1naEXMbVDCRnEDkQDQ8=", + "comment": "v8.4.0", + "path": "github.com/Azure/go-autorest/autorest/azure/cli", + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" }, { "checksumSHA1": "LSF/pNrjhIxl6jiS6bKooBFCOxI=", - "comment": "v8.3.0", + "comment": "v8.4.0", "path": "github.com/Azure/go-autorest/autorest/date", - "revision": "77a52603f06947221c672f10275abc9bf2c7d557", - "revisionTime": "2017-08-16T16:57:29Z", - "version": "v8.3.0", - "versionExact": "v8.3.0" + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" }, { "checksumSHA1": "Ev8qCsbFjDlMlX0N2tYAhYQFpUc=", - "comment": "v8.3.0", + "comment": "v8.4.0", "path": "github.com/Azure/go-autorest/autorest/to", - "revision": "77a52603f06947221c672f10275abc9bf2c7d557", - "revisionTime": "2017-08-16T16:57:29Z", - "version": "v8.3.0", - "versionExact": "v8.3.0" + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" }, { "checksumSHA1": "rGkTfIycpeix5TAbZS74ceGAPHI=", - "comment": "v8.3.0", + "comment": "v8.4.0", "path": "github.com/Azure/go-autorest/autorest/validation", - "revision": "77a52603f06947221c672f10275abc9bf2c7d557", - "revisionTime": "2017-08-16T16:57:29Z", - "version": "v8.3.0", - "versionExact": "v8.3.0" + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T23:19:17Z", + "version": "v8.4.0", + "versionExact": "v8.4.0" }, { "checksumSHA1": "FIL83loX9V9APvGQIjJpbxq53F0=", @@ -509,6 +518,12 @@ "revision": "f0777076321ab64f6efc15a82d9d23b98539b943", "revisionTime": "2016-06-17T17:01:58Z" }, + { + "checksumSHA1": "vI06gXltt7k8zik7bOZvG2PmfYo=", + "path": "github.com/dimchansky/utfbom", + "revision": "6c6132ff69f0f6c088739067407b5d32c52e1d0f", + "revisionTime": "2017-03-28T06:13:12Z" + }, { "checksumSHA1": "BCv50o5pDkoSG3vYKOSai1Z8p3w=", "path": "github.com/fsouza/go-dockerclient", diff --git a/website/azurerm.erb b/website/azurerm.erb index dad2761b876c..77770ede3b4e 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -9,6 +9,15 @@