diff --git a/.teamcity/components/settings.kt b/.teamcity/components/settings.kt index b32bbe5c68cc..139c4ea72b79 100644 --- a/.teamcity/components/settings.kt +++ b/.teamcity/components/settings.kt @@ -25,9 +25,12 @@ var serviceTestConfigurationOverrides = mapOf( // Data Lake has a low quota "datalake" to testConfiguration(2, defaultStartHour), - //HSM has low quota and potentially slow recycle time + // HSM has low quota and potentially slow recycle time "hsm" to testConfiguration(1, defaultStartHour), + // Log Analytics Clusters have a max deployments of 2 - parallelism set to 1 or `importTest` fails + "loganalytics" to testConfiguration(1, defaultStartHour), + // servicebus quotas are limited and we experience failures if tests // execute too quickly as we run out of namespaces in the sub "servicebus" to testConfiguration(10, defaultStartHour), diff --git a/azurerm/helpers/azure/key_vault_child.go b/azurerm/helpers/azure/key_vault_child.go index d352eda26aae..59911399c18c 100644 --- a/azurerm/helpers/azure/key_vault_child.go +++ b/azurerm/helpers/azure/key_vault_child.go @@ -15,6 +15,20 @@ type KeyVaultChildID struct { Version string } +func NewKeyVaultChildResourceID(keyVaultBaseUrl, childType, name, version string) (string, error) { + fmtString := "%s/%s/%s/%s" + keyVaultUrl, err := url.Parse(keyVaultBaseUrl) + if err != nil || keyVaultBaseUrl == "" { + return "", fmt.Errorf("failed to parse Key Vault Base URL %q: %+v", keyVaultBaseUrl, err) + } + // (@jackofallops) - Log Analytics service adds the port number to the API returns, so we strip it here + if hostParts := strings.Split(keyVaultUrl.Host, ":"); len(hostParts) > 1 { + keyVaultUrl.Host = hostParts[0] + } + + return fmt.Sprintf(fmtString, keyVaultUrl.String(), childType, name, version), nil +} + func ParseKeyVaultChildID(id string) (*KeyVaultChildID, error) { // example: https://tharvey-keyvault.vault.azure.net/type/bird/fdf067c93bbb4b22bff4d8b7a9a56217 idURL, err := url.ParseRequestURI(id) diff --git a/azurerm/helpers/azure/key_vault_child_test.go b/azurerm/helpers/azure/key_vault_child_test.go index 2e5951cc7659..0fdbce778ad0 100644 --- a/azurerm/helpers/azure/key_vault_child_test.go +++ b/azurerm/helpers/azure/key_vault_child_test.go @@ -1,6 +1,8 @@ package azure -import "testing" +import ( + "testing" +) func TestAccAzureRMValidateKeyVaultChildID(t *testing.T) { cases := []struct { @@ -320,3 +322,46 @@ func TestAccAzureRMKeyVaultChild_validateName(t *testing.T) { } } } + +func TestNewKeyVaultChildResourceID(t *testing.T) { + childType := "keys" + childName := "test" + childVersion := "testVersionString" + cases := []struct { + Scenario string + keyVaultBaseUrl string + Expected string + ExpectError bool + }{ + { + Scenario: "empty values", + keyVaultBaseUrl: "", + Expected: "", + ExpectError: true, + }, + { + Scenario: "valid, no port", + keyVaultBaseUrl: "https://test.vault.azure.net", + Expected: "https://test.vault.azure.net/keys/test/testVersionString", + ExpectError: false, + }, + { + Scenario: "valid, with port", + keyVaultBaseUrl: "https://test.vault.azure.net:443", + Expected: "https://test.vault.azure.net/keys/test/testVersionString", + ExpectError: false, + }, + } + for _, tc := range cases { + id, err := NewKeyVaultChildResourceID(tc.keyVaultBaseUrl, childType, childName, childVersion) + if err != nil { + if !tc.ExpectError { + t.Fatalf("Got error for New Resource ID '%s': %+v", tc.keyVaultBaseUrl, err) + return + } + } + if id != tc.Expected { + t.Fatalf("Expected id for %q to be %q, got %q", tc.keyVaultBaseUrl, tc.Expected, id) + } + } +} diff --git a/azurerm/internal/services/loganalytics/client/client.go b/azurerm/internal/services/loganalytics/client/client.go index b69b64a2f061..c9ffc1f9cae3 100644 --- a/azurerm/internal/services/loganalytics/client/client.go +++ b/azurerm/internal/services/loganalytics/client/client.go @@ -7,6 +7,7 @@ import ( ) type Client struct { + ClusterClient *operationalinsights.ClustersClient DataExportClient *operationalinsights.DataExportsClient DataSourcesClient *operationalinsights.DataSourcesClient LinkedServicesClient *operationalinsights.LinkedServicesClient @@ -19,6 +20,9 @@ type Client struct { } func NewClient(o *common.ClientOptions) *Client { + ClusterClient := operationalinsights.NewClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ClusterClient.Client, o.ResourceManagerAuthorizer) + DataExportClient := operationalinsights.NewDataExportsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&DataExportClient.Client, o.ResourceManagerAuthorizer) @@ -47,6 +51,7 @@ func NewClient(o *common.ClientOptions) *Client { o.ConfigureClient(&LinkedStorageAccountClient.Client, o.ResourceManagerAuthorizer) return &Client{ + ClusterClient: &ClusterClient, DataExportClient: &DataExportClient, DataSourcesClient: &DataSourcesClient, LinkedServicesClient: &LinkedServicesClient, diff --git a/azurerm/internal/services/loganalytics/log_analytics_cluster.go b/azurerm/internal/services/loganalytics/log_analytics_cluster.go new file mode 100644 index 000000000000..fc5e6d21fae4 --- /dev/null +++ b/azurerm/internal/services/loganalytics/log_analytics_cluster.go @@ -0,0 +1,46 @@ +package loganalytics + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/operationalinsights/mgmt/2020-03-01-preview/operationalinsights" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" +) + +func logAnalyticsClusterWaitForState(ctx context.Context, meta interface{}, timeout time.Duration, resourceGroup string, clusterName string) *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{string(operationalinsights.Updating)}, + Target: []string{string(operationalinsights.Succeeded)}, + MinTimeout: 1 * time.Minute, + Timeout: timeout, + Refresh: logAnalyticsClusterRefresh(ctx, meta, resourceGroup, clusterName), + } +} + +func logAnalyticsClusterRefresh(ctx context.Context, meta interface{}, resourceGroup string, clusterName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + + log.Printf("[INFO] checking on state of Log Analytics Cluster %q", clusterName) + + resp, err := client.Get(ctx, resourceGroup, clusterName) + if err != nil { + return nil, "nil", fmt.Errorf("polling for the status of Log Analytics Cluster %q (Resource Group %q): %v", clusterName, resourceGroup, err) + } + + if resp.ClusterProperties != nil { + if resp.ClusterProperties.ProvisioningState != operationalinsights.Updating && resp.ClusterProperties.ProvisioningState != operationalinsights.Succeeded { + return nil, "nil", fmt.Errorf("Log Analytics Cluster %q (Resource Group %q) unexpected Provisioning State encountered: %q", clusterName, resourceGroup, string(resp.ClusterProperties.ProvisioningState)) + } + + return resp, string(resp.ClusterProperties.ProvisioningState), nil + } + + // I am not returning an error here as this might have just been a bad get + return resp, "nil", nil + } +} diff --git a/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go b/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go new file mode 100644 index 000000000000..def010bce375 --- /dev/null +++ b/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go @@ -0,0 +1,204 @@ +package loganalytics + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/operationalinsights/mgmt/2020-03-01-preview/operationalinsights" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmLogAnalyticsClusterCustomerManagedKey() *schema.Resource { + return &schema.Resource{ + Create: resourceArmLogAnalyticsClusterCustomerManagedKeyCreate, + Read: resourceArmLogAnalyticsClusterCustomerManagedKeyRead, + Update: resourceArmLogAnalyticsClusterCustomerManagedKeyUpdate, + Delete: resourceArmLogAnalyticsClusterCustomerManagedKeyDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(6 * time.Hour), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(6 * time.Hour), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "log_analytics_cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.LogAnalyticsClusterId, + }, + + "key_vault_key_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: azure.ValidateKeyVaultChildIdVersionOptional, + }, + }, + } +} + +func resourceArmLogAnalyticsClusterCustomerManagedKeyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + clusterIdRaw := d.Get("log_analytics_cluster_id").(string) + clusterId, err := parse.LogAnalyticsClusterID(clusterIdRaw) + if err != nil { + return err + } + + resp, err := client.Get(ctx, clusterId.ResourceGroup, clusterId.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Log Analytics Cluster %q (resource group %q) was not found", clusterId.Name, clusterId.ResourceGroup) + } + return fmt.Errorf("failed to get details of Log Analytics Cluster %q (resource group %q): %+v", clusterId.Name, clusterId.ResourceGroup, err) + } + if resp.ClusterProperties != nil && resp.ClusterProperties.KeyVaultProperties != nil { + keyProps := *resp.ClusterProperties.KeyVaultProperties + if keyProps.KeyName != nil && *keyProps.KeyName != "" { + return tf.ImportAsExistsError("azurerm_log_analytics_cluster_customer_managed_key", fmt.Sprintf("%s/CMK", clusterIdRaw)) + } + } + + d.SetId(fmt.Sprintf("%s/CMK", clusterIdRaw)) + return resourceArmLogAnalyticsClusterCustomerManagedKeyUpdate(d, meta) +} + +func resourceArmLogAnalyticsClusterCustomerManagedKeyUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + keyId, err := azure.ParseKeyVaultChildIDVersionOptional(d.Get("key_vault_key_id").(string)) + if err != nil { + return fmt.Errorf("could not parse Key Vault Key ID: %+v", err) + } + + clusterId, err := parse.LogAnalyticsClusterID(d.Get("log_analytics_cluster_id").(string)) + if err != nil { + return err + } + + clusterPatch := operationalinsights.ClusterPatch{ + ClusterPatchProperties: &operationalinsights.ClusterPatchProperties{ + KeyVaultProperties: &operationalinsights.KeyVaultProperties{ + KeyVaultURI: utils.String(keyId.KeyVaultBaseUrl), + KeyName: utils.String(keyId.Name), + KeyVersion: utils.String(keyId.Version), + }, + }, + } + + if _, err := client.Update(ctx, clusterId.ResourceGroup, clusterId.Name, clusterPatch); err != nil { + return fmt.Errorf("updating Log Analytics Cluster %q (Resource Group %q): %+v", clusterId.Name, clusterId.ResourceGroup, err) + } + + updateWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(schema.TimeoutUpdate), clusterId.ResourceGroup, clusterId.Name) + + if _, err := updateWait.WaitForState(); err != nil { + return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", clusterId.Name, clusterId.ResourceGroup, err) + } + + return resourceArmLogAnalyticsClusterCustomerManagedKeyRead(d, meta) +} + +func resourceArmLogAnalyticsClusterCustomerManagedKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + idRaw := strings.TrimRight(d.Id(), "/CMK") + + id, err := parse.LogAnalyticsClusterID(idRaw) + if err != nil { + return err + } + + d.Set("log_analytics_cluster_id", idRaw) + + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] Log Analytics %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving Log Analytics Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + + if props := resp.ClusterProperties; props != nil { + if kvProps := props.KeyVaultProperties; kvProps != nil { + var keyVaultUri, keyName, keyVersion string + if kvProps.KeyVaultURI != nil && *kvProps.KeyVaultURI != "" { + keyVaultUri = *kvProps.KeyVaultURI + } else { + return fmt.Errorf("empty value returned for Key Vault URI") + } + if kvProps.KeyName != nil && *kvProps.KeyName != "" { + keyName = *kvProps.KeyName + } else { + return fmt.Errorf("empty value returned for Key Vault Key Name") + } + if kvProps.KeyVersion != nil { + keyVersion = *kvProps.KeyVersion + } + keyVaultKeyId, err := azure.NewKeyVaultChildResourceID(keyVaultUri, "keys", keyName, keyVersion) + if err != nil { + return err + } + d.Set("key_vault_key_id", keyVaultKeyId) + } + } + + return nil +} + +func resourceArmLogAnalyticsClusterCustomerManagedKeyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + clusterId, err := parse.LogAnalyticsClusterID(d.Get("log_analytics_cluster_id").(string)) + if err != nil { + return err + } + + clusterPatch := operationalinsights.ClusterPatch{ + ClusterPatchProperties: &operationalinsights.ClusterPatchProperties{ + KeyVaultProperties: &operationalinsights.KeyVaultProperties{ + KeyVaultURI: nil, + KeyName: nil, + KeyVersion: nil, + }, + }, + } + + if _, err = client.Update(ctx, clusterId.ResourceGroup, clusterId.Name, clusterPatch); err != nil { + return fmt.Errorf("removing Log Analytics Cluster Customer Managed Key from cluster %q (resource group %q)", clusterId.Name, clusterId.ResourceGroup) + } + + deleteWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(schema.TimeoutDelete), clusterId.ResourceGroup, clusterId.Name) + + if _, err := deleteWait.WaitForState(); err != nil { + return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", clusterId.Name, clusterId.ResourceGroup, err) + } + + return nil +} diff --git a/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go b/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go new file mode 100644 index 000000000000..36a7d3c4361e --- /dev/null +++ b/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go @@ -0,0 +1,297 @@ +package loganalytics + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/operationalinsights/mgmt/2020-03-01-preview/operationalinsights" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmLogAnalyticsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceArmLogAnalyticsClusterCreate, + Read: resourceArmLogAnalyticsClusterRead, + Update: resourceArmLogAnalyticsClusterUpdate, + Delete: resourceArmLogAnalyticsClusterDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(6 * time.Hour), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(6 * time.Hour), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + _, err := parse.LogAnalyticsClusterID(id) + return err + }), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.LogAnalyticsClustersName, + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "location": azure.SchemaLocation(), + + "identity": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(operationalinsights.SystemAssigned), + }, false), + }, + + "principal_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tenant_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + // Per the documentation cluster capacity must start at 1000 GB and can go above 3000 GB with an exception by Microsoft + // so I am not limiting the upperbound here by design + // https://docs.microsoft.com/en-us/azure/azure-monitor/platform/manage-cost-storage#log-analytics-dedicated-clusters + "size_gb": { + Type: schema.TypeInt, + Optional: true, + Default: 1000, + ValidateFunc: validation.All( + validation.IntAtLeast(1000), + validation.IntDivisibleBy(100), + ), + }, + + "cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tags.Schema(), + }, + } +} + +func resourceArmLogAnalyticsClusterCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + id := parse.NewLogAnalyticsClusterId(name, resourceGroup) + + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing Log Analytics Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_log_analytics_cluster", *existing.ID) + } + + parameters := operationalinsights.Cluster{ + Location: utils.String(location.Normalize(d.Get("location").(string))), + Identity: expandArmLogAnalyticsClusterIdentity(d.Get("identity").([]interface{})), + Sku: &operationalinsights.ClusterSku{ + Capacity: utils.Int64(int64(d.Get("size_gb").(int))), + Name: operationalinsights.CapacityReservation, + }, + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, parameters) + if err != nil { + return fmt.Errorf("creating Log Analytics Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting on creating future for Log Analytics Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if _, err = client.Get(ctx, resourceGroup, name); err != nil { + return fmt.Errorf("retrieving Log Analytics Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + createWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(schema.TimeoutCreate), id.ResourceGroup, id.Name) + + if _, err := createWait.WaitForState(); err != nil { + return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", id.Name, id.ResourceGroup, err) + } + + d.SetId(id.ID(subscriptionId)) + + return resourceArmLogAnalyticsClusterRead(d, meta) +} + +func resourceArmLogAnalyticsClusterRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LogAnalyticsClusterID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] Log Analytics %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving Log Analytics Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("location", location.NormalizeNilable(resp.Location)) + if err := d.Set("identity", flattenArmLogAnalyticsIdentity(resp.Identity)); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } + if props := resp.ClusterProperties; props != nil { + d.Set("cluster_id", props.ClusterID) + } + + capacity := 0 + if sku := resp.Sku; sku != nil { + if sku.Capacity != nil { + capacity = int(*sku.Capacity) + } + } + d.Set("size_gb", capacity) + + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceArmLogAnalyticsClusterUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LogAnalyticsClusterID(d.Id()) + if err != nil { + return err + } + + parameters := operationalinsights.ClusterPatch{} + + if d.HasChange("size_gb") { + parameters.Sku = &operationalinsights.ClusterSku{ + Capacity: utils.Int64(int64(d.Get("size_gb").(int))), + Name: operationalinsights.CapacityReservation, + } + } + + if d.HasChange("tags") { + parameters.Tags = tags.Expand(d.Get("tags").(map[string]interface{})) + } + + if _, err := client.Update(ctx, id.ResourceGroup, id.Name, parameters); err != nil { + return fmt.Errorf("updating Log Analytics Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + + // Need to wait for the cluster to actually finish updating the resource before continuing + // since the service returns a 200 instantly while it's still updating in the background + log.Printf("[INFO] Checking for Log Analytics Cluster provisioning state") + + updateWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(schema.TimeoutUpdate), id.ResourceGroup, id.Name) + + if _, err := updateWait.WaitForState(); err != nil { + return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", id.Name, id.ResourceGroup, err) + } + + return resourceArmLogAnalyticsClusterRead(d, meta) +} + +func resourceArmLogAnalyticsClusterDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).LogAnalytics.ClusterClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LogAnalyticsClusterID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.Name) + if err != nil { + return fmt.Errorf("deleting Log Analytics Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting on deleting future for Log Analytics Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + + return nil +} + +func expandArmLogAnalyticsClusterIdentity(input []interface{}) *operationalinsights.Identity { + if len(input) == 0 { + return nil + } + v := input[0].(map[string]interface{}) + return &operationalinsights.Identity{ + Type: operationalinsights.IdentityType(v["type"].(string)), + } +} + +func flattenArmLogAnalyticsIdentity(input *operationalinsights.Identity) []interface{} { + if input == nil { + return make([]interface{}, 0) + } + + var t operationalinsights.IdentityType + if input.Type != "" { + t = input.Type + } + var principalId string + if input.PrincipalID != nil { + principalId = *input.PrincipalID + } + var tenantId string + if input.TenantID != nil { + tenantId = *input.TenantID + } + return []interface{}{ + map[string]interface{}{ + "type": t, + "principal_id": principalId, + "tenant_id": tenantId, + }, + } +} diff --git a/azurerm/internal/services/loganalytics/parse/log_analytics_cluster.go b/azurerm/internal/services/loganalytics/parse/log_analytics_cluster.go new file mode 100644 index 000000000000..84db7d322792 --- /dev/null +++ b/azurerm/internal/services/loganalytics/parse/log_analytics_cluster.go @@ -0,0 +1,45 @@ +package parse + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type LogAnalyticsClusterId struct { + ResourceGroup string + Name string +} + +func NewLogAnalyticsClusterId(name, resourceGroup string) LogAnalyticsClusterId { + return LogAnalyticsClusterId{ + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id LogAnalyticsClusterId) ID(subscriptionId string) string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.OperationalInsights/clusters/%s" + return fmt.Sprintf(fmtString, subscriptionId, id.ResourceGroup, id.Name) +} + +func LogAnalyticsClusterID(input string) (*LogAnalyticsClusterId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, fmt.Errorf("parsing operationalinsightsCluster ID %q: %+v", input, err) + } + + logAnalyticsCluster := LogAnalyticsClusterId{ + ResourceGroup: id.ResourceGroup, + } + + if logAnalyticsCluster.Name, err = id.PopSegment("clusters"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &logAnalyticsCluster, nil +} diff --git a/azurerm/internal/services/loganalytics/parse/log_analytics_cluster_test.go b/azurerm/internal/services/loganalytics/parse/log_analytics_cluster_test.go new file mode 100644 index 000000000000..02eae7e4ad71 --- /dev/null +++ b/azurerm/internal/services/loganalytics/parse/log_analytics_cluster_test.go @@ -0,0 +1,72 @@ +package parse + +import ( + "testing" +) + +func TestLogAnalyticsClusterID(t *testing.T) { + testData := []struct { + Name string + Input string + Expected *LogAnalyticsClusterId + }{ + { + Name: "Empty", + Input: "", + Expected: nil, + }, + { + Name: "No Resource Groups Segment", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000", + Expected: nil, + }, + { + Name: "No Resource Groups Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/", + Expected: nil, + }, + { + Name: "Resource Group ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/foo/", + Expected: nil, + }, + { + Name: "Missing Cluster Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup1/providers/Microsoft.OperationalInsights/clusters", + Expected: nil, + }, + { + Name: "Log Analytics Cluster ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup1/providers/Microsoft.OperationalInsights/clusters/cluster1", + Expected: &LogAnalyticsClusterId{ + ResourceGroup: "resourceGroup1", + Name: "cluster1", + }, + }, + { + Name: "Wrong Casing", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup1/providers/Microsoft.OperationalInsights/Clusters/cluster1", + Expected: nil, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q..", v.Name) + + actual, err := LogAnalyticsClusterID(v.Input) + if err != nil { + if v.Expected == nil { + continue + } + t.Fatalf("Expected a value but got an error: %s", err) + } + + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/loganalytics/registration.go b/azurerm/internal/services/loganalytics/registration.go index 41e329acf8dc..c151a47dd346 100644 --- a/azurerm/internal/services/loganalytics/registration.go +++ b/azurerm/internal/services/loganalytics/registration.go @@ -27,14 +27,16 @@ func (r Registration) SupportedDataSources() map[string]*schema.Resource { // SupportedResources returns the supported Resources supported by this Service func (r Registration) SupportedResources() map[string]*schema.Resource { return map[string]*schema.Resource{ + "azurerm_log_analytics_cluster": resourceArmLogAnalyticsCluster(), + "azurerm_log_analytics_cluster_customer_managed_key": resourceArmLogAnalyticsClusterCustomerManagedKey(), + "azurerm_log_analytics_datasource_windows_event": resourceArmLogAnalyticsDataSourceWindowsEvent(), + "azurerm_log_analytics_datasource_windows_performance_counter": resourceArmLogAnalyticsDataSourceWindowsPerformanceCounter(), "azurerm_log_analytics_data_export_rule": resourceArmLogAnalyticsDataExport(), "azurerm_log_analytics_linked_service": resourceArmLogAnalyticsLinkedService(), "azurerm_log_analytics_linked_storage_account": resourceArmLogAnalyticsLinkedStorageAccount(), "azurerm_log_analytics_saved_search": resourceArmLogAnalyticsSavedSearch(), "azurerm_log_analytics_solution": resourceArmLogAnalyticsSolution(), - "azurerm_log_analytics_workspace": resourceArmLogAnalyticsWorkspace(), - "azurerm_log_analytics_datasource_windows_event": resourceArmLogAnalyticsDataSourceWindowsEvent(), "azurerm_log_analytics_storage_insights": resourceArmLogAnalyticsStorageInsights(), - "azurerm_log_analytics_datasource_windows_performance_counter": resourceArmLogAnalyticsDataSourceWindowsPerformanceCounter(), + "azurerm_log_analytics_workspace": resourceArmLogAnalyticsWorkspace(), } } diff --git a/azurerm/internal/services/loganalytics/suppress/log_analytics_cluster.go b/azurerm/internal/services/loganalytics/suppress/log_analytics_cluster.go new file mode 100644 index 000000000000..5feda882b175 --- /dev/null +++ b/azurerm/internal/services/loganalytics/suppress/log_analytics_cluster.go @@ -0,0 +1,27 @@ +package suppress + +import ( + "fmt" + "net" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func LogAnalyticsClusterUrl(_, old, new string, _ *schema.ResourceData) bool { + u, err := url.ParseRequestURI(old) + if err != nil || u.Host == "" { + return false + } + + host, _, err := net.SplitHostPort(u.Host) + if err != nil { + host = u.Host + } + + if new == fmt.Sprintf("%s://%s/", u.Scheme, host) { + return true + } + + return false +} diff --git a/azurerm/internal/services/loganalytics/suppress/log_analytics_cluster_test.go b/azurerm/internal/services/loganalytics/suppress/log_analytics_cluster_test.go new file mode 100644 index 000000000000..9d160b7f3767 --- /dev/null +++ b/azurerm/internal/services/loganalytics/suppress/log_analytics_cluster_test.go @@ -0,0 +1,87 @@ +package suppress + +import "testing" + +func TestCaseClusterUrl(t *testing.T) { + cases := []struct { + Name string + ClusterURL string + KeyVaultURL string + Suppress bool + }{ + { + Name: "empty URL", + ClusterURL: "", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "URL with port and wrong scheme", + ClusterURL: "http://flynns.arcade.com:443", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "invalid URL scheme", + ClusterURL: "https//flynns.arcade.com", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "invalid URL character", + ClusterURL: "https://flynns^arcade.com/", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "invalid URL missing scheme", + ClusterURL: "//flynns.arcade.com/", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "URL with wrong scheme no port", + ClusterURL: "http://flynns.arcade.com", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "same URL different case", + ClusterURL: "https://Flynns.Arcade.com/", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: false, + }, + { + Name: "full URL with username@host/path?query#fragment", + ClusterURL: "https://Creator4983@flynns.arcade.com/ENCOM?games=MatrixBlaster#MCP", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: true, + }, + { + Name: "full URL with username:password@host/path?query#fragment", + ClusterURL: "https://Creator4983:7898@flynns.arcade.com/ENCOM?games=SpaceParanoids&developer=KevinFlynn#MCP", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: true, + }, + { + Name: "URL missing path separator", + ClusterURL: "https://flynns.arcade.com", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: true, + }, + { + Name: "same URL", + ClusterURL: "https://flynns.arcade.com/", + KeyVaultURL: "https://flynns.arcade.com/", + Suppress: true, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + if LogAnalyticsClusterUrl("test", tc.ClusterURL, tc.KeyVaultURL, nil) != tc.Suppress { + t.Fatalf("Expected LogAnalyticsClusterUrl to return %t for '%q' == '%q'", tc.Suppress, tc.ClusterURL, tc.KeyVaultURL) + } + }) + } +} diff --git a/azurerm/internal/services/loganalytics/tests/log_analytics_cluster_customer_managed_key_resource_test.go b/azurerm/internal/services/loganalytics/tests/log_analytics_cluster_customer_managed_key_resource_test.go new file mode 100644 index 000000000000..7e5ba44c7d2b --- /dev/null +++ b/azurerm/internal/services/loganalytics/tests/log_analytics_cluster_customer_managed_key_resource_test.go @@ -0,0 +1,209 @@ +package tests + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMLogAnalyticsClusterCustomerManagedKey_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_log_analytics_cluster_customer_managed_key", "test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsClusterCustomerManagedKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsClusterCustomerManagedKey_complete(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsClusterCustomerManagedKeyExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func testCheckAzureRMLogAnalyticsClusterCustomerManagedKeyExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := acceptance.AzureProvider.Meta().(*clients.Client).LogAnalytics.ClusterClient + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Log Analytics Cluster Custoemr Managed Key not found: %s", resourceName) + } + + id, err := parse.LogAnalyticsClusterID(strings.TrimRight(rs.Primary.ID, "/CMK")) + if err != nil { + return err + } + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("bad: get on Log Analytics Cluster for CMK: %+v", err) + } + } + if resp.ClusterProperties == nil || resp.ClusterProperties.KeyVaultProperties == nil { + return fmt.Errorf("bad: Log Analytics Cluster has no Cutomer Managed Key Configured") + } + if resp.ClusterProperties.KeyVaultProperties.KeyVaultURI == nil || *resp.ClusterProperties.KeyVaultProperties.KeyVaultURI == "" { + return fmt.Errorf("bad: Log Analytics Cluster Customer Managed Key is not configured") + } + if resp.ClusterProperties.KeyVaultProperties.KeyName == nil || *resp.ClusterProperties.KeyVaultProperties.KeyName == "" { + return fmt.Errorf("bad: Log Analytics Cluster Customer Managed Key is not configured") + } + + return nil + } +} + +func testCheckAzureRMLogAnalyticsClusterCustomerManagedKeyDestroy(s *terraform.State) error { + client := acceptance.AzureProvider.Meta().(*clients.Client).LogAnalytics.ClusterClient + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_log_analytics_cluster_customer_managed_key" { + continue + } + id, err := parse.LogAnalyticsClusterID(strings.TrimRight(rs.Primary.ID, "/CMK")) + if err != nil { + return err + } + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("bad: get on Log Analytics Cluster for CMK: %+v", err) + } + } + if resp.ClusterProperties != nil && resp.ClusterProperties.KeyVaultProperties != nil { + if resp.ClusterProperties.KeyVaultProperties.KeyName != nil || *resp.ClusterProperties.KeyVaultProperties.KeyName != "" { + return fmt.Errorf("Bad: Log Analytics CLuster Customer Managed Key %q still present", *resp.ClusterProperties.KeyVaultProperties.KeyName) + } + if resp.ClusterProperties.KeyVaultProperties.KeyVaultURI != nil || *resp.ClusterProperties.KeyVaultProperties.KeyVaultURI != "" { + return fmt.Errorf("Bad: Log Analytics CLuster Customer Managed Key Vault URI %q still present", *resp.ClusterProperties.KeyVaultProperties.KeyVaultURI) + } + if resp.ClusterProperties.KeyVaultProperties.KeyVersion != nil || *resp.ClusterProperties.KeyVaultProperties.KeyVersion != "" { + return fmt.Errorf("Bad: Log Analytics CLuster Customer Managed Key Version %q still present", *resp.ClusterProperties.KeyVaultProperties.KeyVersion) + } + } + return nil + } + return nil +} + +func testAccAzureRMLogAnalyticsClusterCustomerManagedKey_template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-la-%[1]d" + location = "%[2]s" +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_log_analytics_cluster" "test" { + name = "acctest-LA-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + identity { + type = "SystemAssigned" + } +} + + +resource "azurerm_key_vault" "test" { + name = "vault%[3]s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + + sku_name = "premium" + + soft_delete_enabled = true + soft_delete_retention_days = 7 + purge_protection_enabled = true +} + + +resource "azurerm_key_vault_access_policy" "terraform" { + key_vault_id = azurerm_key_vault.test.id + + key_permissions = [ + "create", + "delete", + "get", + "update", + "list", + ] + + secret_permissions = [ + "get", + "delete", + "set", + ] + + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id +} + +resource "azurerm_key_vault_key" "test" { + name = "key-%[3]s" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + depends_on = [azurerm_key_vault_access_policy.terraform] +} + +resource "azurerm_key_vault_access_policy" "test" { + key_vault_id = azurerm_key_vault.test.id + + key_permissions = [ + "get", + "unwrapkey", + "wrapkey" + ] + + tenant_id = azurerm_log_analytics_cluster.test.identity.0.tenant_id + object_id = azurerm_log_analytics_cluster.test.identity.0.principal_id + + depends_on = [azurerm_key_vault_access_policy.terraform] +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString) +} + +func testAccAzureRMLogAnalyticsClusterCustomerManagedKey_complete(data acceptance.TestData) string { + template := testAccAzureRMLogAnalyticsClusterCustomerManagedKey_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_cluster_customer_managed_key" "test" { + log_analytics_cluster_id = azurerm_log_analytics_cluster.test.id + key_vault_key_id = azurerm_key_vault_key.test.id + + depends_on = [azurerm_key_vault_access_policy.test] +} + +`, template) +} diff --git a/azurerm/internal/services/loganalytics/tests/log_analytics_cluster_resource_test.go b/azurerm/internal/services/loganalytics/tests/log_analytics_cluster_resource_test.go new file mode 100644 index 000000000000..fe2743ad8224 --- /dev/null +++ b/azurerm/internal/services/loganalytics/tests/log_analytics_cluster_resource_test.go @@ -0,0 +1,183 @@ +package tests + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMLogAnalyticsCluster_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_log_analytics_cluster", "test") + resource.Test(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsClusterExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMLogAnalyticsCluster_resize(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_log_analytics_cluster", "test") + resource.Test(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsClusterExists(data.ResourceName), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMLogAnalyticsCluster_resize(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsClusterExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMLogAnalyticsCluster_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_log_analytics_cluster", "test") + resource.Test(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMLogAnalyticsClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMLogAnalyticsCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMLogAnalyticsClusterExists(data.ResourceName), + ), + }, + data.RequiresImportErrorStep(testAccAzureRMLogAnalyticsCluster_requiresImport), + }, + }) +} + +func testCheckAzureRMLogAnalyticsClusterExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := acceptance.AzureProvider.Meta().(*clients.Client).LogAnalytics.ClusterClient + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("log analytics Cluster not found: %s", resourceName) + } + id, err := parse.LogAnalyticsClusterID(rs.Primary.ID) + if err != nil { + return err + } + if resp, err := client.Get(ctx, id.ResourceGroup, id.Name); err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("bad: log analytics Cluster %q does not exist", id.Name) + } + return fmt.Errorf("bad: Get on LogAnalytics.ClusterClient: %+v", err) + } + return nil + } +} + +func testCheckAzureRMLogAnalyticsClusterDestroy(s *terraform.State) error { + client := acceptance.AzureProvider.Meta().(*clients.Client).LogAnalytics.ClusterClient + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_log_analytics_cluster" { + continue + } + id, err := parse.LogAnalyticsClusterID(rs.Primary.ID) + if err != nil { + return err + } + if resp, err := client.Get(ctx, id.ResourceGroup, id.Name); err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("bad: Get on LogAnalytics.ClusterClient: %+v", err) + } + } + return nil + } + return nil +} + +func testAccAzureRMLogAnalyticsCluster_template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-la-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func testAccAzureRMLogAnalyticsCluster_basic(data acceptance.TestData) string { + template := testAccAzureRMLogAnalyticsCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_cluster" "test" { + name = "acctest-LA-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + identity { + type = "SystemAssigned" + } +} +`, template, data.RandomInteger) +} + +func testAccAzureRMLogAnalyticsCluster_resize(data acceptance.TestData) string { + template := testAccAzureRMLogAnalyticsCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_cluster" "test" { + name = "acctest-LA-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size_gb = 1100 + + identity { + type = "SystemAssigned" + } +} +`, template, data.RandomInteger) +} + +func testAccAzureRMLogAnalyticsCluster_requiresImport(data acceptance.TestData) string { + config := testAccAzureRMLogAnalyticsCluster_basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_cluster" "import" { + name = azurerm_log_analytics_cluster.test.name + resource_group_name = azurerm_log_analytics_cluster.test.resource_group_name + location = azurerm_log_analytics_cluster.test.location + + identity { + type = "SystemAssigned" + } +} +`, config) +} diff --git a/azurerm/internal/services/loganalytics/validate/log_analytics_cluster.go b/azurerm/internal/services/loganalytics/validate/log_analytics_cluster.go new file mode 100644 index 000000000000..a1d27573b598 --- /dev/null +++ b/azurerm/internal/services/loganalytics/validate/log_analytics_cluster.go @@ -0,0 +1,26 @@ +package validate + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" +) + +func LogAnalyticsClustersName(i interface{}, k string) (warnings []string, errors []error) { + return logAnalyticsGenericName(i, k) +} + +func LogAnalyticsClusterId(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + _, err := parse.LogAnalyticsClusterID(v) + if err != nil { + errors = append(errors, fmt.Errorf("expected %s to be a Log Analytics Cluster ID:, %+v", k, err)) + } + + return warnings, errors +} diff --git a/azurerm/internal/services/loganalytics/validate/log_analytics_cluster_test.go b/azurerm/internal/services/loganalytics/validate/log_analytics_cluster_test.go new file mode 100644 index 000000000000..c7d531596c65 --- /dev/null +++ b/azurerm/internal/services/loganalytics/validate/log_analytics_cluster_test.go @@ -0,0 +1,73 @@ +package validate + +import ( + "testing" +) + +func TestLogAnalyticsClustersName(t *testing.T) { + testCases := []struct { + Name string + Input string + Expected bool + }{ + { + Name: "Too short", + Input: "inv", + Expected: false, + }, + { + Name: "Invalid characters underscores", + Input: "invalid_Clusters_Name", + Expected: false, + }, + { + Name: "Invalid characters space", + Input: "invalid Clusters Name", + Expected: false, + }, + { + Name: "Invalid name starts with hyphen", + Input: "-invalidClustersName", + Expected: false, + }, + { + Name: "Invalid name ends with hyphen", + Input: "invalidClustersName-", + Expected: false, + }, + { + Name: "Invalid name too long", + Input: "thisIsToLoooooooooooooooooooooooooooooooooooooongForAClusterName", + Expected: false, + }, + { + Name: "Valid name", + Input: "validClustersName", + Expected: true, + }, + { + Name: "Valid name with hyphen", + Input: "validClustersName-2", + Expected: true, + }, + { + Name: "Valid name max length", + Input: "thisIsTheLooooooooooooooooooooooooongestValidClusterNameThereIs", + Expected: true, + }, + { + Name: "Valid name min length", + Input: "vali", + Expected: true, + }, + } + for _, v := range testCases { + t.Logf("[DEBUG] Testing %q..", v.Name) + + _, errors := LogAnalyticsClustersName(v.Input, "name") + result := len(errors) == 0 + if result != v.Expected { + t.Fatalf("Expected the result to be %t but got %t (and %d errors)", v.Expected, result, len(errors)) + } + } +} diff --git a/azurerm/internal/services/loganalytics/validate/log_analytics_name.go b/azurerm/internal/services/loganalytics/validate/log_analytics_name.go index 390aaa86200a..98f6effcfe28 100644 --- a/azurerm/internal/services/loganalytics/validate/log_analytics_name.go +++ b/azurerm/internal/services/loganalytics/validate/log_analytics_name.go @@ -14,11 +14,11 @@ func logAnalyticsGenericName(i interface{}, k string) (warnings []string, errors return } if len(v) < 4 { - errors = append(errors, fmt.Errorf("length should be greater than %d", 4)) + errors = append(errors, fmt.Errorf("length should be greater than %d, got %q", 4, v)) return } if len(v) > 63 { - errors = append(errors, fmt.Errorf("length should be less than %d", 63)) + errors = append(errors, fmt.Errorf("length should be less than %d, got %q", 63, v)) return } if !regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$`).MatchString(v) { diff --git a/website/azurerm.erb b/website/azurerm.erb index 749622023b6a..1cce10f2e1b7 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -1999,6 +1999,14 @@