From c15831e5a43919590a98e9ca42a9b812d9995eea Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 5 Nov 2019 10:45:43 +0100 Subject: [PATCH 01/45] r/kubernetes_cluster: documenting the new `enable_node_public_ip` field --- website/docs/r/kubernetes_cluster.html.markdown | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 876f9e3f027c..9b248c3887c9 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -163,12 +163,16 @@ A `agent_pool_profile` block supports the following: * `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Note that auto scaling feature requires the that the `type` is set to `VirtualMachineScaleSets` +* `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? + * `min_count` - (Optional) Minimum number of nodes for auto-scaling * `max_count` - (Optional) Maximum number of nodes for auto-scaling * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. +* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`) + * `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. * `os_type` - (Optional) The Operating System used for the Agents. Possible values are `Linux` and `Windows`. Changing this forces a new resource to be created. Defaults to `Linux`. @@ -179,8 +183,6 @@ A `agent_pool_profile` block supports the following: ~> **NOTE:** A route table should be configured on this Subnet. -* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`) - --- A `azure_active_directory` block supports the following: From cf947f86e66b245972eab9b7435fae7065ae3a01 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 5 Nov 2019 11:46:39 +0100 Subject: [PATCH 02/45] r/kubernetes_cluster: conditionally updating the cluster if fields have changed --- azurerm/resource_arm_kubernetes_cluster.go | 196 +++++++++++++-------- 1 file changed, 124 insertions(+), 72 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index f62e2fbc6ce4..793d87b4430d 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -20,6 +20,9 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) +// TODO: more granular update tests +// TODO: 4046 - splitting agent_pool_profile out into it's own resource + func resourceArmKubernetesCluster() *schema.Resource { return &schema.Resource{ Create: resourceArmKubernetesClusterCreate, @@ -347,6 +350,21 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, + "api_server_authorized_ip_ranges": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validate.CIDR, + }, + }, + + "enable_pod_security_policy": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "linux_profile": { Type: schema.TypeList, Optional: true, @@ -476,6 +494,13 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, + "node_resource_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "role_based_access_control": { Type: schema.TypeList, Optional: true, @@ -535,12 +560,12 @@ func resourceArmKubernetesCluster() *schema.Resource { "tags": tags.Schema(), + // Computed "fqdn": { Type: schema.TypeString, Computed: true, }, - // Computed "kube_admin_config": { Type: schema.TypeList, Computed: true, @@ -624,28 +649,6 @@ func resourceArmKubernetesCluster() *schema.Resource { Computed: true, Sensitive: true, }, - - "node_resource_group": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "api_server_authorized_ip_ranges": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validate.CIDR, - }, - }, - - "enable_pod_security_policy": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, }, } } @@ -760,6 +763,8 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} resourceGroup := id.ResourceGroup name := id.Path["managedClusters"] + d.Partial(true) + if d.HasChange("service_principal") { log.Printf("[DEBUG] Updating the Service Principal for Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup) servicePrincipals := d.Get("service_principal").([]interface{}) @@ -783,73 +788,120 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Updated the Service Principal for Kubernetes Cluster %q (Resource Group %q).", name, resourceGroup) } - location := azure.NormalizeLocation(d.Get("location").(string)) - dnsPrefix := d.Get("dns_prefix").(string) - kubernetesVersion := d.Get("kubernetes_version").(string) - - linuxProfile := expandKubernetesClusterLinuxProfile(d) - agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d) + // we need to conditionally update the cluster + existing, err := client.Get(ctx, resourceGroup, name) if err != nil { - return err + return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + if existing.ManagedClusterProperties == nil { + return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): `properties` was nil", name, resourceGroup) } - windowsProfile := expandKubernetesClusterWindowsProfile(d) - networkProfile := expandKubernetesClusterNetworkProfile(d) - servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) - addonProfiles := expandKubernetesClusterAddonProfiles(d) - t := d.Get("tags").(map[string]interface{}) + // since there's multiple reasons why we could be called into Update, we use this to only update if something's changed that's not SP/Version + updateCluster := false - rbacRaw := d.Get("role_based_access_control").([]interface{}) - rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + // TODO: update the expand functions so we pass in the array + if d.HasChange("addon_profile") { + updateCluster = true + addonProfiles := expandKubernetesClusterAddonProfiles(d) + existing.ManagedClusterProperties.AddonProfiles = addonProfiles + } - apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() - apiServerAuthorizedIPRanges := utils.ExpandStringSlice(apiServerAuthorizedIPRangesRaw) + if d.HasChange("agent_pool_profile") { + updateCluster = true + agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d) + if err != nil { + return err + } - nodeResourceGroup := d.Get("node_resource_group").(string) + existing.ManagedClusterProperties.AgentPoolProfiles = &agentProfiles + } - enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) + if d.HasChange("api_server_authorized_ip_ranges") { + updateCluster = true + apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() + existing.APIServerAuthorizedIPRanges = utils.ExpandStringSlice(apiServerAuthorizedIPRangesRaw) + } - // TODO: should these values be conditionally updated? - parameters := containerservice.ManagedCluster{ - Name: &name, - Location: &location, - ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - APIServerAuthorizedIPRanges: apiServerAuthorizedIPRanges, - AadProfile: azureADProfile, - AddonProfiles: addonProfiles, - AgentPoolProfiles: &agentProfiles, - DNSPrefix: utils.String(dnsPrefix), - EnableRBAC: utils.Bool(rbacEnabled), - KubernetesVersion: utils.String(kubernetesVersion), - LinuxProfile: linuxProfile, - WindowsProfile: windowsProfile, - NetworkProfile: networkProfile, - ServicePrincipalProfile: servicePrincipalProfile, - NodeResourceGroup: utils.String(nodeResourceGroup), - EnablePodSecurityPolicy: utils.Bool(enablePodSecurityPolicy), - }, - Tags: tags.Expand(t), + if d.HasChange("enable_pod_security_policy") { + updateCluster = true + enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) + existing.ManagedClusterProperties.EnablePodSecurityPolicy = utils.Bool(enablePodSecurityPolicy) } - future, err := client.CreateOrUpdate(ctx, resourceGroup, name, parameters) - if err != nil { - return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + if d.HasChange("linux_profile") { + updateCluster = true + linuxProfile := expandKubernetesClusterLinuxProfile(d) + existing.ManagedClusterProperties.LinuxProfile = linuxProfile } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + // TODO: does this want to be split out + if d.HasChange("network_profile") { + updateCluster = true + networkProfile := expandKubernetesClusterNetworkProfile(d) + existing.ManagedClusterProperties.NetworkProfile = networkProfile } - read, err := client.Get(ctx, resourceGroup, name) - if err != nil { - return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + if d.HasChange("role_based_access_control") { + updateCluster = true + rbacRaw := d.Get("role_based_access_control").([]interface{}) + rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + existing.ManagedClusterProperties.AadProfile = azureADProfile + existing.ManagedClusterProperties.EnableRBAC = utils.Bool(rbacEnabled) } - if read.ID == nil { - return fmt.Errorf("Cannot read ID for Managed Kubernetes Cluster %q (Resource Group %q)", name, resourceGroup) + if d.HasChange("tags") { + updateCluster = true + t := d.Get("tags").(map[string]interface{}) + existing.Tags = tags.Expand(t) } - d.SetId(*read.ID) + if d.HasChange("windows_profile") { + updateCluster = true + windowsProfile := expandKubernetesClusterWindowsProfile(d) + existing.ManagedClusterProperties.WindowsProfile = windowsProfile + } + + if updateCluster { + log.Printf("[DEBUG] Updating the Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup) + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, existing) + if err != nil { + return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + log.Printf("[DEBUG] Updated the Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup) + } + + // then roll the version of Kubernetes if necessary + if d.HasChange("kubernetes_version") { + existing, err = client.Get(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + if existing.ManagedClusterProperties == nil { + return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): `properties` was nil", name, resourceGroup) + } + + kubernetesVersion := d.Get("kubernetes_version").(string) + log.Printf("[DEBUG] Upgrading the version of Kubernetes to %q..", kubernetesVersion) + existing.ManagedClusterProperties.KubernetesVersion = utils.String(kubernetesVersion) + + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, existing) + if err != nil { + return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + log.Printf("[DEBUG] Upgraded the version of Kubernetes to %q..", kubernetesVersion) + } + + d.Partial(false) return resourceArmKubernetesClusterRead(d, meta) } From 5f8b8877f9f5cd2ba400733d6ecaffddec47037f Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 5 Nov 2019 11:53:39 +0100 Subject: [PATCH 03/45] r/kubernetes_cluster: sorting the documentation Also adding a note about using ignore_changes with auto-scaling --- .../docs/r/kubernetes_cluster.html.markdown | 82 +++++++++++-------- 1 file changed, 46 insertions(+), 36 deletions(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 9b248c3887c9..d99bce267efa 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -109,46 +109,52 @@ resource "azurerm_subnet" "virtual" { --- -* `addon_profile` - (Optional) A `addon_profile` block. +* `addon_profile` - (Optional) A `addon_profile` block as defined below. * `api_server_authorized_ip_ranges` - (Optional) The IP ranges to whitelist for incoming traffic to the masters. -> **NOTE:** `api_server_authorized_ip_ranges` Is currently in Preview on an opt-in basis. To use it, enable feature `APIServerSecurityPreview` for `namespace Microsoft.ContainerService`. For an example of how to enable a Preview feature, please visit [How to enable the Azure Firewall Public Preview](https://docs.microsoft.com/en-us/azure/firewall/public-preview) -* `kubernetes_version` - (Optional) Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). - -* `linux_profile` - (Optional) A `linux_profile` block. +* `enable_pod_security_policy` - (Optional) Whether Pod Security Policies are enabled. Note that this also requires role based access control to be enabled. -* `windows_profile` - (Optional) A `windows_profile` block. +-> **NOTE:** Support for `enable_pod_security_policy` is currently in Preview on an opt-in basis. To use it, enable feature `PodSecurityPolicyPreview` for `namespace Microsoft.ContainerService`. For an example of how to enable a Preview feature, please visit [Register scale set feature provider](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler#register-scale-set-feature-provider). -* `network_profile` - (Optional) A `network_profile` block. +* `kubernetes_version` - (Optional) Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). --> **NOTE:** If `network_profile` is not defined, `kubenet` profile will be used by default. +-> **NOTE:** Upgrading your cluster may take up to 10 minutes per node. -* `role_based_access_control` - (Optional) A `role_based_access_control` block. Changing this forces a new resource to be created. +* `linux_profile` - (Optional) A `linux_profile` block as defined below. -* `enable_pod_security_policy` - (Optional) Whether Pod Security Policies are enabled. Note that this also requires role based access control to be enabled. +* `network_profile` - (Optional) A `network_profile` block as defined below. --> **NOTE:** Support for `enable_pod_security_policy` is currently in Preview on an opt-in basis. To use it, enable feature `PodSecurityPolicyPreview` for `namespace Microsoft.ContainerService`. For an example of how to enable a Preview feature, please visit [Register scale set feature provider](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler#register-scale-set-feature-provider). +-> **NOTE:** If `network_profile` is not defined, `kubenet` profile will be used by default. * `node_resource_group` - (Optional) The name of the Resource Group where the the Kubernetes Nodes should exist. Changing this forces a new resource to be created. -> **NOTE:** Azure requires that a new, non-existent Resource Group is used, as otherwise the provisioning of the Kubernetes Service will fail. +* `role_based_access_control` - (Optional) A `role_based_access_control` block. Changing this forces a new resource to be created. + * `tags` - (Optional) A mapping of tags to assign to the resource. +* `windows_profile` - (Optional) A `windows_profile` block as defined below. + --- A `addon_profile` block supports the following: * `aci_connector_linux` - (Optional) A `aci_connector_linux` block. For more details, please visit [Create and configure an AKS cluster to use virtual nodes](https://docs.microsoft.com/en-us/azure/aks/virtual-nodes-portal). -* `http_application_routing` - (Optional) A `http_application_routing` block. -* `oms_agent` - (Optional) A `oms_agent` block. For more details, please visit [How to onboard Azure Monitor for containers](https://docs.microsoft.com/en-us/azure/monitoring/monitoring-container-insights-onboard). -* `kube_dashboard` - (Optional) A `kube_dashboard` block. -* `azure_policy` - (Optional) A `azure_policy` block. For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) + +* `azure_policy` - (Optional) A `azure_policy` block as defined below. For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) -> **NOTE**: Azure Policy for Azure Kubernetes Service is currently in preview and not available to subscriptions that have not [opted-in](https://docs.microsoft.com/en-us/azure/governance/policy/concepts/rego-for-aks?toc=/azure/aks/toc.json) to join `Azure Policy` preview. +* `http_application_routing` - (Optional) A `http_application_routing` block as defined below. + +* `kube_dashboard` - (Optional) A `kube_dashboard` block as defined below. + +* `oms_agent` - (Optional) A `oms_agent` block as defined below. For more details, please visit [How to onboard Azure Monitor for containers](https://docs.microsoft.com/en-us/azure/monitoring/monitoring-container-insights-onboard). + --- A `agent_pool_profile` block supports the following: @@ -157,6 +163,8 @@ A `agent_pool_profile` block supports the following: * `count` - (Optional) Number of Agents (VMs) in the Pool. Possible values must be in the range of 1 to 100 (inclusive). Defaults to `1`. +-> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to this field. + * `vm_size` - (Required) The size of each VM in the Agent Pool (e.g. `Standard_F1`). Changing this forces a new resource to be created. * `availability_zones` - (Optional) Availability zones for nodes. The property `type` of the `agent_pool_profile` must be set to `VirtualMachineScaleSets` in order to use availability zones. @@ -165,9 +173,9 @@ A `agent_pool_profile` block supports the following: * `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? -* `min_count` - (Optional) Minimum number of nodes for auto-scaling +* `min_count` - (Optional) Minimum number of nodes for auto-scaling. -* `max_count` - (Optional) Maximum number of nodes for auto-scaling +* `max_count` - (Optional) Maximum number of nodes for auto-scaling. * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. @@ -181,7 +189,7 @@ A `agent_pool_profile` block supports the following: * `vnet_subnet_id` - (Optional) The ID of the Subnet where the Agents in the Pool should be provisioned. Changing this forces a new resource to be created. -~> **NOTE:** A route table should be configured on this Subnet. +~> **NOTE:** A route table must be configured on this Subnet. --- @@ -195,6 +203,13 @@ A `azure_active_directory` block supports the following: * `tenant_id` - (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. Changing this forces a new resource to be created. + +--- + +A `azure_policy` block supports the following: + +* `enabled` - (Required) Is the Azure Policy for Kubernetes Add On enabled? + --- A `http_application_routing` block supports the following: @@ -203,19 +218,17 @@ A `http_application_routing` block supports the following: --- -A `linux_profile` block supports the following: - -* `admin_username` - (Required) The Admin Username for the Cluster. Changing this forces a new resource to be created. +A `kube_dashboard` block supports the following: -* `ssh_key` - (Required) An `ssh_key` block. Only one is currently allowed. Changing this forces a new resource to be created. +* `enabled` - (Required) Is the Kubernetes Dashboard enabled? --- -A `windows_profile` block supports the following: +A `linux_profile` block supports the following: -* `admin_username` - (Required) The Admin Username for Windows VMs. +* `admin_username` - (Required) The Admin Username for the Cluster. Changing this forces a new resource to be created. -* `admin_password` - (Required) The Admin Password for Windows VMs. +* `ssh_key` - (Required) An `ssh_key` block. Only one is currently allowed. Changing this forces a new resource to be created. --- @@ -251,18 +264,6 @@ A `oms_agent` block supports the following: --- -A `kube_dashboard` block supports the following: - -* `enabled` - (Required) Is the Kubernetes Dashboard enabled? - ---- - -A `azure_policy` block supports the following: - -* `enabled` - (Required) Is the Azure Policy for Kubernetes Add On enabled? - ---- - A `role_based_access_control` block supports the following: * `azure_active_directory` - (Optional) An `azure_active_directory` block. Changing this forces a new resource to be created. @@ -283,6 +284,15 @@ A `ssh_key` block supports the following: * `key_data` - (Required) The Public SSH Key used to access the cluster. Changing this forces a new resource to be created. +--- + +A `windows_profile` block supports the following: + +* `admin_username` - (Required) The Admin Username for Windows VMs. + +* `admin_password` - (Required) The Admin Password for Windows VMs. + + ## Attributes Reference The following attributes are exported: From f3d22cc79e8b0b396a034a8f535844b29b3524fa Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 5 Nov 2019 12:00:52 +0100 Subject: [PATCH 04/45] r/kubernetes_cluster: switching to pass in the objects rather than the ResourceData --- azurerm/resource_arm_kubernetes_cluster.go | 83 ++++++++++++---------- 1 file changed, 45 insertions(+), 38 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 793d87b4430d..2a29cb96efd7 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -681,18 +681,27 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} dnsPrefix := d.Get("dns_prefix").(string) kubernetesVersion := d.Get("kubernetes_version").(string) - linuxProfile := expandKubernetesClusterLinuxProfile(d) - agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d) + linuxProfileRaw := d.Get("linux_profile").([]interface{}) + linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) + + agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) + agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, true) if err != nil { return err } - windowsProfile := expandKubernetesClusterWindowsProfile(d) - servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) - networkProfile := expandKubernetesClusterNetworkProfile(d) - addonProfiles := expandKubernetesClusterAddonProfiles(d) + addOnProfilesRaw := d.Get("addon_profile").([]interface{}) + addonProfiles := expandKubernetesClusterAddonProfiles(addOnProfilesRaw) + + networkProfileRaw := d.Get("network_profile").([]interface{}) + networkProfile := expandKubernetesClusterNetworkProfile(networkProfileRaw) + + servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) t := d.Get("tags").(map[string]interface{}) + windowsProfileRaw := d.Get("windows_profile").([]interface{}) + windowsProfile := expandKubernetesClusterWindowsProfile(windowsProfileRaw) + rbacRaw := d.Get("role_based_access_control").([]interface{}) rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) @@ -800,16 +809,17 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} // since there's multiple reasons why we could be called into Update, we use this to only update if something's changed that's not SP/Version updateCluster := false - // TODO: update the expand functions so we pass in the array if d.HasChange("addon_profile") { updateCluster = true - addonProfiles := expandKubernetesClusterAddonProfiles(d) + addOnProfilesRaw := d.Get("addon_profile").([]interface{}) + addonProfiles := expandKubernetesClusterAddonProfiles(addOnProfilesRaw) existing.ManagedClusterProperties.AddonProfiles = addonProfiles } if d.HasChange("agent_pool_profile") { updateCluster = true - agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d) + agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) + agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false) if err != nil { return err } @@ -831,14 +841,16 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("linux_profile") { updateCluster = true - linuxProfile := expandKubernetesClusterLinuxProfile(d) + linuxProfileRaw := d.Get("linux_profile").([]interface{}) + linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) existing.ManagedClusterProperties.LinuxProfile = linuxProfile } // TODO: does this want to be split out if d.HasChange("network_profile") { updateCluster = true - networkProfile := expandKubernetesClusterNetworkProfile(d) + networkProfileRaw := d.Get("network_profile").([]interface{}) + networkProfile := expandKubernetesClusterNetworkProfile(networkProfileRaw) existing.ManagedClusterProperties.NetworkProfile = networkProfile } @@ -858,7 +870,8 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("windows_profile") { updateCluster = true - windowsProfile := expandKubernetesClusterWindowsProfile(d) + windowsProfileRaw := d.Get("windows_profile").([]interface{}) + windowsProfile := expandKubernetesClusterWindowsProfile(windowsProfileRaw) existing.ManagedClusterProperties.WindowsProfile = windowsProfile } @@ -1066,13 +1079,12 @@ func flattenKubernetesClusterAccessProfile(profile containerservice.ManagedClust return nil, []interface{}{} } -func expandKubernetesClusterAddonProfiles(d *schema.ResourceData) map[string]*containerservice.ManagedClusterAddonProfile { - profiles := d.Get("addon_profile").([]interface{}) - if len(profiles) == 0 { +func expandKubernetesClusterAddonProfiles(input []interface{}) map[string]*containerservice.ManagedClusterAddonProfile { + if len(input) == 0 { return nil } - profile := profiles[0].(map[string]interface{}) + profile := input[0].(map[string]interface{}) addonProfiles := map[string]*containerservice.ManagedClusterAddonProfile{} httpApplicationRouting := profile["http_application_routing"].([]interface{}) @@ -1235,12 +1247,12 @@ func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice. return []interface{}{values} } -func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) ([]containerservice.ManagedClusterAgentPoolProfile, error) { - configs := d.Get("agent_pool_profile").([]interface{}) - +func expandKubernetesClusterAgentPoolProfiles(input []interface{}, isNewResource bool) ([]containerservice.ManagedClusterAgentPoolProfile, error) { profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0) - for config_id := range configs { - config := configs[config_id].(map[string]interface{}) + + // TODO: fix this + for config_id := range input { + config := input[config_id].(map[string]interface{}) name := config["name"].(string) poolType := config["type"].(string) @@ -1280,13 +1292,13 @@ func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) ([]contain // Auto scaling will change the number of nodes, but the original count number should not be sent again. // This avoid the cluster being resized after creation. - if *profile.EnableAutoScaling && !d.IsNewResource() { + if *profile.EnableAutoScaling && !isNewResource { profile.Count = nil } } - if availavilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availavilityZones) > 0 { - profile.AvailabilityZones = availavilityZones + if availabilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availabilityZones) > 0 { + profile.AvailabilityZones = availabilityZones } if *profile.EnableAutoScaling && (profile.MinCount == nil || profile.MaxCount == nil) { @@ -1392,14 +1404,12 @@ func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.Mana return agentPoolProfiles } -func expandKubernetesClusterLinuxProfile(d *schema.ResourceData) *containerservice.LinuxProfile { - profiles := d.Get("linux_profile").([]interface{}) - - if len(profiles) == 0 { +func expandKubernetesClusterLinuxProfile(input []interface{}) *containerservice.LinuxProfile { + if len(input) == 0 { return nil } - config := profiles[0].(map[string]interface{}) + config := input[0].(map[string]interface{}) adminUsername := config["admin_username"].(string) linuxKeys := config["ssh_key"].([]interface{}) @@ -1454,14 +1464,12 @@ func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile } } -func expandKubernetesClusterWindowsProfile(d *schema.ResourceData) *containerservice.ManagedClusterWindowsProfile { - profiles := d.Get("windows_profile").([]interface{}) - - if len(profiles) == 0 { +func expandKubernetesClusterWindowsProfile(input []interface{}) *containerservice.ManagedClusterWindowsProfile { + if len(input) == 0 { return nil } - config := profiles[0].(map[string]interface{}) + config := input[0].(map[string]interface{}) adminUsername := config["admin_username"].(string) adminPassword := config["admin_password"].(string) @@ -1498,13 +1506,12 @@ func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClu } } -func expandKubernetesClusterNetworkProfile(d *schema.ResourceData) *containerservice.NetworkProfileType { - configs := d.Get("network_profile").([]interface{}) - if len(configs) == 0 { +func expandKubernetesClusterNetworkProfile(input []interface{}) *containerservice.NetworkProfileType { + if len(input) == 0 { return nil } - config := configs[0].(map[string]interface{}) + config := input[0].(map[string]interface{}) networkPlugin := config["network_plugin"].(string) networkPolicy := config["network_policy"].(string) From 2e74e7d858ad642f86271a2a237eaae9667b0412 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 5 Nov 2019 12:10:58 +0100 Subject: [PATCH 05/45] r/kubernetes_cluster: inlining the `expandServicePrincipalProfile` method since it's different Create&Update --- azurerm/resource_arm_kubernetes_cluster.go | 31 +++++++++------------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 2a29cb96efd7..6b30ebff5e23 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -696,15 +696,22 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} networkProfileRaw := d.Get("network_profile").([]interface{}) networkProfile := expandKubernetesClusterNetworkProfile(networkProfileRaw) - servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) + rbacRaw := d.Get("role_based_access_control").([]interface{}) + rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + + // since the Create and Update use separate methods, there's no point extracting this out + servicePrincipalProfileRaw := d.Get("service_principal").([]interface{}) + servicePrincipalProfileVal := servicePrincipalProfileRaw[0].(map[string]interface{}) + servicePrincipalProfile := &containerservice.ManagedClusterServicePrincipalProfile{ + ClientID: utils.String(servicePrincipalProfileVal["client_id"].(string)), + Secret: utils.String(servicePrincipalProfileVal["client_secret"].(string)), + } + t := d.Get("tags").(map[string]interface{}) windowsProfileRaw := d.Get("windows_profile").([]interface{}) windowsProfile := expandKubernetesClusterWindowsProfile(windowsProfileRaw) - rbacRaw := d.Get("role_based_access_control").([]interface{}) - rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) - apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() apiServerAuthorizedIPRanges := utils.ExpandStringSlice(apiServerAuthorizedIPRangesRaw) @@ -1674,23 +1681,11 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana } } -func expandAzureRmKubernetesClusterServicePrincipal(d *schema.ResourceData) *containerservice.ManagedClusterServicePrincipalProfile { - value, exists := d.GetOk("service_principal") - configs := value.([]interface{}) - - if !exists || len(configs) == 0 { +func expandAzureRmKubernetesClusterServicePrincipal(input []interface{}) *containerservice.ManagedClusterServicePrincipalProfile { + if len(input) == 0 { return nil } - config := configs[0].(map[string]interface{}) - - clientId := config["client_id"].(string) - clientSecret := config["client_secret"].(string) - - return &containerservice.ManagedClusterServicePrincipalProfile{ - ClientID: &clientId, - Secret: &clientSecret, - } } func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile, d *schema.ResourceData) []interface{} { From 6814035428a0e1a15a4aa12c09a73c03b0818cc4 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 5 Nov 2019 12:19:12 +0100 Subject: [PATCH 06/45] r/kubernetes_cluster: updating the versions of kubernetes being used --- azurerm/resource_arm_kubernetes_cluster.go | 7 ------- azurerm/resource_arm_kubernetes_cluster_test.go | 4 ++-- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 6b30ebff5e23..4f0255656363 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -1681,13 +1681,6 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana } } -func expandAzureRmKubernetesClusterServicePrincipal(input []interface{}) *containerservice.ManagedClusterServicePrincipalProfile { - if len(input) == 0 { - return nil - } - -} - func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile, d *schema.ResourceData) []interface{} { if profile == nil { return []interface{}{} diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 1ca9942f136d..9187b7b4485d 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -12,8 +12,8 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" ) -var olderKubernetesVersion = "1.13.10" -var currentKubernetesVersion = "1.14.6" +var olderKubernetesVersion = "1.14.8" +var currentKubernetesVersion = "1.15.5" func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" From 657aceda010091413b306dd851e7afbf9bf9c514 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 12:57:40 +0100 Subject: [PATCH 07/45] r/kubernetes_cluster: making the addon_profile elements computed but the top level object not --- azurerm/resource_arm_kubernetes_cluster.go | 125 ++++++++++++--------- 1 file changed, 71 insertions(+), 54 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 4f0255656363..8616682b8591 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -258,7 +258,6 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, - Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "http_application_routing": { @@ -266,6 +265,7 @@ func resourceArmKubernetesCluster() *schema.Resource { MaxItems: 1, ForceNew: true, Optional: true, + Computed: true, // TODO: remove in 2.0 Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -285,6 +285,7 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, // TODO: remove in 2.0 Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -304,6 +305,7 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, // TODO: remove in 2.0 Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -323,6 +325,7 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, // TODO: remove in 2.0 Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -337,6 +340,7 @@ func resourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, // TODO: remove in 2.0 Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -1087,8 +1091,20 @@ func flattenKubernetesClusterAccessProfile(profile containerservice.ManagedClust } func expandKubernetesClusterAddonProfiles(input []interface{}) map[string]*containerservice.ManagedClusterAddonProfile { + disabled := containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(false), + } + + profiles := map[string]*containerservice.ManagedClusterAddonProfile{ + // note: the casing on these keys is important + "aciConnectorLinux": &disabled, + "azurepolicy": &disabled, + "kubeDashboard": &disabled, + "httpApplicationRouting": &disabled, + "omsagent": &disabled, + } if len(input) == 0 { - return nil + return profiles } profile := input[0].(map[string]interface{}) @@ -1161,67 +1177,53 @@ func expandKubernetesClusterAddonProfiles(input []interface{}) map[string]*conta } func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice.ManagedClusterAddonProfile) []interface{} { - values := make(map[string]interface{}) - - routes := make([]interface{}, 0) - if httpApplicationRouting := profile["httpApplicationRouting"]; httpApplicationRouting != nil { + aciConnectors := make([]interface{}, 0) + if aciConnector := profile["aciConnectorLinux"]; aciConnector != nil { enabled := false - if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil { + if enabledVal := aciConnector.Enabled; enabledVal != nil { enabled = *enabledVal } - zoneName := "" - if v := httpApplicationRouting.Config["HTTPApplicationRoutingZoneName"]; v != nil { - zoneName = *v + subnetName := "" + if v := aciConnector.Config["SubnetName"]; v != nil { + subnetName = *v } - output := map[string]interface{}{ - "enabled": enabled, - "http_application_routing_zone_name": zoneName, - } - routes = append(routes, output) + aciConnectors = append(aciConnectors, map[string]interface{}{ + "enabled": enabled, + "subnet_name": subnetName, + }) } - values["http_application_routing"] = routes - agents := make([]interface{}, 0) - if omsAgent := profile["omsagent"]; omsAgent != nil { + azurePolicies := make([]interface{}, 0) + if azurePolicy := profile["azurepolicy"]; azurePolicy != nil { enabled := false - if enabledVal := omsAgent.Enabled; enabledVal != nil { + if enabledVal := azurePolicy.Enabled; enabledVal != nil { enabled = *enabledVal } - workspaceId := "" - if workspaceResourceID := omsAgent.Config["logAnalyticsWorkspaceResourceID"]; workspaceResourceID != nil { - workspaceId = *workspaceResourceID - } - - output := map[string]interface{}{ - "enabled": enabled, - "log_analytics_workspace_id": workspaceId, - } - agents = append(agents, output) + azurePolicies = append(azurePolicies, map[string]interface{}{ + "enabled": enabled, + }) } - values["oms_agent"] = agents - aciConnectors := make([]interface{}, 0) - if aciConnector := profile["aciConnectorLinux"]; aciConnector != nil { + httpApplicationRoutes := make([]interface{}, 0) + if httpApplicationRouting := profile["httpApplicationRouting"]; httpApplicationRouting != nil { enabled := false - if enabledVal := aciConnector.Enabled; enabledVal != nil { + if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil { enabled = *enabledVal } - subnetName := "" - if v := aciConnector.Config["SubnetName"]; v != nil { - subnetName = *v + zoneName := "" + if v := httpApplicationRouting.Config["HTTPApplicationRoutingZoneName"]; v != nil { + zoneName = *v } - output := map[string]interface{}{ - "enabled": enabled, - "subnet_name": subnetName, - } - aciConnectors = append(aciConnectors, output) + httpApplicationRoutes = append(httpApplicationRoutes, map[string]interface{}{ + "enabled": enabled, + "http_application_routing_zone_name": zoneName, + }) } - values["aci_connector_linux"] = aciConnectors kubeDashboards := make([]interface{}, 0) if kubeDashboard := profile["kubeDashboard"]; kubeDashboard != nil { @@ -1230,28 +1232,43 @@ func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice. enabled = *enabledVal } - output := map[string]interface{}{ + kubeDashboards = append(kubeDashboards, map[string]interface{}{ "enabled": enabled, - } - kubeDashboards = append(kubeDashboards, output) + }) } - values["kube_dashboard"] = kubeDashboards - azurePolicies := make([]interface{}, 0) - if azurePolicy := profile["azurepolicy"]; azurePolicy != nil { + omsAgents := make([]interface{}, 0) + if omsAgent := profile["omsagent"]; omsAgent != nil { enabled := false - if enabledVal := azurePolicy.Enabled; enabledVal != nil { + if enabledVal := omsAgent.Enabled; enabledVal != nil { enabled = *enabledVal } - output := map[string]interface{}{ - "enabled": enabled, + workspaceId := "" + if workspaceResourceID := omsAgent.Config["logAnalyticsWorkspaceResourceID"]; workspaceResourceID != nil { + workspaceId = *workspaceResourceID } - azurePolicies = append(azurePolicies, output) + + omsAgents = append(omsAgents, map[string]interface{}{ + "enabled": enabled, + "log_analytics_workspace_id": workspaceId, + }) + } + + // this is a UX hack, since if the top level block isn't defined everything should be turned off + if len(aciConnectors) == 0 && len(azurePolicies) == 0 && len(httpApplicationRoutes) == 0 && len(kubeDashboards) == 0 && len(omsAgents) == 0 { + return []interface{}{} } - values["azure_policy"] = azurePolicies - return []interface{}{values} + return []interface{}{ + map[string]interface{}{ + "aci_connector_linux": aciConnectors + "azure_policy": azurePolicies, + "http_application_routing": httpApplicationRoutes, + "kube_dashboard": kubeDashboards, + "oms_agent": omsAgents, + }, + } } func expandKubernetesClusterAgentPoolProfiles(input []interface{}, isNewResource bool) ([]containerservice.ManagedClusterAgentPoolProfile, error) { From 0f8d8eb8fd860f58effa5e5e8b0e3408893b4b18 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 13:07:29 +0100 Subject: [PATCH 08/45] r/kubernetes_cluster: refactoring the addon helpers out --- .../services/containers/kubernetes_addons.go | 296 ++++++++++++++++++ azurerm/resource_arm_kubernetes_cluster.go | 288 +---------------- 2 files changed, 301 insertions(+), 283 deletions(-) create mode 100644 azurerm/internal/services/containers/kubernetes_addons.go diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go new file mode 100644 index 000000000000..520cd4c0d735 --- /dev/null +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -0,0 +1,296 @@ +package containers + +import ( + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +const ( + aciConnectorKey = "aciConnectorLinux" + azurePolicyKey = "azurepolicy" + kubernetesDashboardKey = "kubeDashboard" + httpApplicationRoutingKey = "httpApplicationRouting" + omsAgentKey = "omsagent" +) + +func SchemaKubernetesAddOnProfiles() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aci_connector_linux": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + + "subnet_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validate.NoEmptyStrings, + }, + }, + }, + }, + + "azure_policy": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + + "kube_dashboard": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + + "http_application_routing": { + Type: schema.TypeList, + MaxItems: 1, + ForceNew: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + ForceNew: true, + Required: true, + }, + "http_application_routing_zone_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "oms_agent": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "log_analytics_workspace_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + }, + }, + }, + }, + } +} + +func ExpandKubernetesAddOnProfiles(input []interface{}) map[string]*containerservice.ManagedClusterAddonProfile { + disabled := containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(false), + } + + profiles := map[string]*containerservice.ManagedClusterAddonProfile{ + // note: the casing on these keys is important + aciConnectorKey: &disabled, + azurePolicyKey: &disabled, + kubernetesDashboardKey: &disabled, + httpApplicationRoutingKey: &disabled, + omsAgentKey: &disabled, + } + if len(input) == 0 { + return profiles + } + + profile := input[0].(map[string]interface{}) + addonProfiles := map[string]*containerservice.ManagedClusterAddonProfile{} + + httpApplicationRouting := profile["http_application_routing"].([]interface{}) + if len(httpApplicationRouting) > 0 { + value := httpApplicationRouting[0].(map[string]interface{}) + enabled := value["enabled"].(bool) + addonProfiles["httpApplicationRouting"] = &containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(enabled), + } + } + + omsAgent := profile["oms_agent"].([]interface{}) + if len(omsAgent) > 0 { + value := omsAgent[0].(map[string]interface{}) + config := make(map[string]*string) + enabled := value["enabled"].(bool) + + if workspaceId, ok := value["log_analytics_workspace_id"]; ok && workspaceId != "" { + config["logAnalyticsWorkspaceResourceID"] = utils.String(workspaceId.(string)) + } + + addonProfiles["omsagent"] = &containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(enabled), + Config: config, + } + } + + aciConnector := profile["aci_connector_linux"].([]interface{}) + if len(aciConnector) > 0 { + value := aciConnector[0].(map[string]interface{}) + config := make(map[string]*string) + enabled := value["enabled"].(bool) + + if subnetName, ok := value["subnet_name"]; ok && subnetName != "" { + config["SubnetName"] = utils.String(subnetName.(string)) + } + + addonProfiles["aciConnectorLinux"] = &containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(enabled), + Config: config, + } + } + + kubeDashboard := profile["kube_dashboard"].([]interface{}) + if len(kubeDashboard) > 0 { + value := kubeDashboard[0].(map[string]interface{}) + enabled := value["enabled"].(bool) + + addonProfiles["kubeDashboard"] = &containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(enabled), + Config: nil, + } + } + + azurePolicy := profile["azure_policy"].([]interface{}) + if len(azurePolicy) > 0 && azurePolicy[0] != nil { + value := azurePolicy[0].(map[string]interface{}) + enabled := value["enabled"].(bool) + + addonProfiles["azurepolicy"] = &containerservice.ManagedClusterAddonProfile{ + Enabled: utils.Bool(enabled), + Config: nil, + } + } + + return addonProfiles +} + +func FlattenKubernetesAddOnProfiles(profile map[string]*containerservice.ManagedClusterAddonProfile) []interface{} { + aciConnectors := make([]interface{}, 0) + if aciConnector := profile[aciConnectorKey]; aciConnector != nil { + enabled := false + if enabledVal := aciConnector.Enabled; enabledVal != nil { + enabled = *enabledVal + } + + subnetName := "" + if v := aciConnector.Config["SubnetName"]; v != nil { + subnetName = *v + } + + aciConnectors = append(aciConnectors, map[string]interface{}{ + "enabled": enabled, + "subnet_name": subnetName, + }) + } + + azurePolicies := make([]interface{}, 0) + if azurePolicy := profile[azurePolicyKey]; azurePolicy != nil { + enabled := false + if enabledVal := azurePolicy.Enabled; enabledVal != nil { + enabled = *enabledVal + } + + azurePolicies = append(azurePolicies, map[string]interface{}{ + "enabled": enabled, + }) + } + + httpApplicationRoutes := make([]interface{}, 0) + if httpApplicationRouting := profile[httpApplicationRoutingKey]; httpApplicationRouting != nil { + enabled := false + if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil { + enabled = *enabledVal + } + + zoneName := "" + if v := httpApplicationRouting.Config["HTTPApplicationRoutingZoneName"]; v != nil { + zoneName = *v + } + + httpApplicationRoutes = append(httpApplicationRoutes, map[string]interface{}{ + "enabled": enabled, + "http_application_routing_zone_name": zoneName, + }) + } + + kubeDashboards := make([]interface{}, 0) + if kubeDashboard := profile[kubernetesDashboardKey]; kubeDashboard != nil { + enabled := false + if enabledVal := kubeDashboard.Enabled; enabledVal != nil { + enabled = *enabledVal + } + + kubeDashboards = append(kubeDashboards, map[string]interface{}{ + "enabled": enabled, + }) + } + + omsAgents := make([]interface{}, 0) + if omsAgent := profile[omsAgentKey]; omsAgent != nil { + enabled := false + if enabledVal := omsAgent.Enabled; enabledVal != nil { + enabled = *enabledVal + } + + workspaceId := "" + if workspaceResourceID := omsAgent.Config["logAnalyticsWorkspaceResourceID"]; workspaceResourceID != nil { + workspaceId = *workspaceResourceID + } + + omsAgents = append(omsAgents, map[string]interface{}{ + "enabled": enabled, + "log_analytics_workspace_id": workspaceId, + }) + } + + // this is a UX hack, since if the top level block isn't defined everything should be turned off + if len(aciConnectors) == 0 && len(azurePolicies) == 0 && len(httpApplicationRoutes) == 0 && len(kubeDashboards) == 0 && len(omsAgents) == 0 { + return []interface{}{} + } + + return []interface{}{ + map[string]interface{}{ + "aci_connector_linux": aciConnectors, + "azure_policy": azurePolicies, + "http_application_routing": httpApplicationRoutes, + "kube_dashboard": kubeDashboards, + "oms_agent": omsAgents, + }, + } +} diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 8616682b8591..4365c5dba7fe 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -15,6 +15,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -254,105 +255,7 @@ func resourceArmKubernetesCluster() *schema.Resource { }, // Optional - "addon_profile": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "http_application_routing": { - Type: schema.TypeList, - MaxItems: 1, - ForceNew: true, - Optional: true, - Computed: true, // TODO: remove in 2.0 - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - ForceNew: true, - Required: true, - }, - "http_application_routing_zone_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "oms_agent": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, // TODO: remove in 2.0 - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - "log_analytics_workspace_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: azure.ValidateResourceID, - }, - }, - }, - }, - - "aci_connector_linux": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, // TODO: remove in 2.0 - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - "subnet_name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validate.NoEmptyStrings, - }, - }, - }, - }, - - "kube_dashboard": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, // TODO: remove in 2.0 - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - }, - - "azure_policy": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, // TODO: remove in 2.0 - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - }, - }, - }, - }, + "addon_profile": containers.SchemaKubernetesAddOnProfiles(), "api_server_authorized_ip_ranges": { Type: schema.TypeSet, @@ -695,7 +598,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} } addOnProfilesRaw := d.Get("addon_profile").([]interface{}) - addonProfiles := expandKubernetesClusterAddonProfiles(addOnProfilesRaw) + addonProfiles := containers.ExpandKubernetesAddOnProfiles(addOnProfilesRaw) networkProfileRaw := d.Get("network_profile").([]interface{}) networkProfile := expandKubernetesClusterNetworkProfile(networkProfileRaw) @@ -823,7 +726,7 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("addon_profile") { updateCluster = true addOnProfilesRaw := d.Get("addon_profile").([]interface{}) - addonProfiles := expandKubernetesClusterAddonProfiles(addOnProfilesRaw) + addonProfiles := containers.ExpandKubernetesAddOnProfiles(addOnProfilesRaw) existing.ManagedClusterProperties.AddonProfiles = addonProfiles } @@ -976,7 +879,7 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `api_server_authorized_ip_ranges`: %+v", err) } - addonProfiles := flattenKubernetesClusterAddonProfiles(props.AddonProfiles) + addonProfiles := containers.FlattenKubernetesAddOnProfiles(props.AddonProfiles) if err := d.Set("addon_profile", addonProfiles); err != nil { return fmt.Errorf("Error setting `addon_profile`: %+v", err) } @@ -1090,187 +993,6 @@ func flattenKubernetesClusterAccessProfile(profile containerservice.ManagedClust return nil, []interface{}{} } -func expandKubernetesClusterAddonProfiles(input []interface{}) map[string]*containerservice.ManagedClusterAddonProfile { - disabled := containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(false), - } - - profiles := map[string]*containerservice.ManagedClusterAddonProfile{ - // note: the casing on these keys is important - "aciConnectorLinux": &disabled, - "azurepolicy": &disabled, - "kubeDashboard": &disabled, - "httpApplicationRouting": &disabled, - "omsagent": &disabled, - } - if len(input) == 0 { - return profiles - } - - profile := input[0].(map[string]interface{}) - addonProfiles := map[string]*containerservice.ManagedClusterAddonProfile{} - - httpApplicationRouting := profile["http_application_routing"].([]interface{}) - if len(httpApplicationRouting) > 0 { - value := httpApplicationRouting[0].(map[string]interface{}) - enabled := value["enabled"].(bool) - addonProfiles["httpApplicationRouting"] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(enabled), - } - } - - omsAgent := profile["oms_agent"].([]interface{}) - if len(omsAgent) > 0 { - value := omsAgent[0].(map[string]interface{}) - config := make(map[string]*string) - enabled := value["enabled"].(bool) - - if workspaceId, ok := value["log_analytics_workspace_id"]; ok && workspaceId != "" { - config["logAnalyticsWorkspaceResourceID"] = utils.String(workspaceId.(string)) - } - - addonProfiles["omsagent"] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(enabled), - Config: config, - } - } - - aciConnector := profile["aci_connector_linux"].([]interface{}) - if len(aciConnector) > 0 { - value := aciConnector[0].(map[string]interface{}) - config := make(map[string]*string) - enabled := value["enabled"].(bool) - - if subnetName, ok := value["subnet_name"]; ok && subnetName != "" { - config["SubnetName"] = utils.String(subnetName.(string)) - } - - addonProfiles["aciConnectorLinux"] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(enabled), - Config: config, - } - } - - kubeDashboard := profile["kube_dashboard"].([]interface{}) - if len(kubeDashboard) > 0 { - value := kubeDashboard[0].(map[string]interface{}) - enabled := value["enabled"].(bool) - - addonProfiles["kubeDashboard"] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(enabled), - Config: nil, - } - } - - azurePolicy := profile["azure_policy"].([]interface{}) - if len(azurePolicy) > 0 && azurePolicy[0] != nil { - value := azurePolicy[0].(map[string]interface{}) - enabled := value["enabled"].(bool) - - addonProfiles["azurepolicy"] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(enabled), - Config: nil, - } - } - - return addonProfiles -} - -func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice.ManagedClusterAddonProfile) []interface{} { - aciConnectors := make([]interface{}, 0) - if aciConnector := profile["aciConnectorLinux"]; aciConnector != nil { - enabled := false - if enabledVal := aciConnector.Enabled; enabledVal != nil { - enabled = *enabledVal - } - - subnetName := "" - if v := aciConnector.Config["SubnetName"]; v != nil { - subnetName = *v - } - - aciConnectors = append(aciConnectors, map[string]interface{}{ - "enabled": enabled, - "subnet_name": subnetName, - }) - } - - azurePolicies := make([]interface{}, 0) - if azurePolicy := profile["azurepolicy"]; azurePolicy != nil { - enabled := false - if enabledVal := azurePolicy.Enabled; enabledVal != nil { - enabled = *enabledVal - } - - azurePolicies = append(azurePolicies, map[string]interface{}{ - "enabled": enabled, - }) - } - - httpApplicationRoutes := make([]interface{}, 0) - if httpApplicationRouting := profile["httpApplicationRouting"]; httpApplicationRouting != nil { - enabled := false - if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil { - enabled = *enabledVal - } - - zoneName := "" - if v := httpApplicationRouting.Config["HTTPApplicationRoutingZoneName"]; v != nil { - zoneName = *v - } - - httpApplicationRoutes = append(httpApplicationRoutes, map[string]interface{}{ - "enabled": enabled, - "http_application_routing_zone_name": zoneName, - }) - } - - kubeDashboards := make([]interface{}, 0) - if kubeDashboard := profile["kubeDashboard"]; kubeDashboard != nil { - enabled := false - if enabledVal := kubeDashboard.Enabled; enabledVal != nil { - enabled = *enabledVal - } - - kubeDashboards = append(kubeDashboards, map[string]interface{}{ - "enabled": enabled, - }) - } - - omsAgents := make([]interface{}, 0) - if omsAgent := profile["omsagent"]; omsAgent != nil { - enabled := false - if enabledVal := omsAgent.Enabled; enabledVal != nil { - enabled = *enabledVal - } - - workspaceId := "" - if workspaceResourceID := omsAgent.Config["logAnalyticsWorkspaceResourceID"]; workspaceResourceID != nil { - workspaceId = *workspaceResourceID - } - - omsAgents = append(omsAgents, map[string]interface{}{ - "enabled": enabled, - "log_analytics_workspace_id": workspaceId, - }) - } - - // this is a UX hack, since if the top level block isn't defined everything should be turned off - if len(aciConnectors) == 0 && len(azurePolicies) == 0 && len(httpApplicationRoutes) == 0 && len(kubeDashboards) == 0 && len(omsAgents) == 0 { - return []interface{}{} - } - - return []interface{}{ - map[string]interface{}{ - "aci_connector_linux": aciConnectors - "azure_policy": azurePolicies, - "http_application_routing": httpApplicationRoutes, - "kube_dashboard": kubeDashboards, - "oms_agent": omsAgents, - }, - } -} - func expandKubernetesClusterAgentPoolProfiles(input []interface{}, isNewResource bool) ([]containerservice.ManagedClusterAgentPoolProfile, error) { profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0) From 12283cfee4480300f6aa251d509ec7f873c7b231 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 14:48:37 +0100 Subject: [PATCH 09/45] r/kubernetes_cluster: making the top level addon_profile block computed --- azurerm/internal/services/containers/kubernetes_addons.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index 520cd4c0d735..2db94d74f042 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -21,6 +21,7 @@ func SchemaKubernetesAddOnProfiles() *schema.Schema { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "aci_connector_linux": { From 659816ff3c3d629aab7f471092d73190f8d83e03 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 14:49:01 +0100 Subject: [PATCH 10/45] r/kubernetes_cluster: introducing a new block `default_node_pool` --- .../containers/kubernetes_nodepool.go | 338 ++++++++++++++++++ azurerm/resource_arm_kubernetes_cluster.go | 56 ++- 2 files changed, 383 insertions(+), 11 deletions(-) create mode 100644 azurerm/internal/services/containers/kubernetes_nodepool.go diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go new file mode 100644 index 000000000000..fab8d16cc1d9 --- /dev/null +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -0,0 +1,338 @@ +package containers + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func SchemaDefaultNodePool() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Required + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.KubernetesAgentPoolName, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: string(containerservice.VirtualMachineScaleSets), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.AvailabilitySet), + string(containerservice.VirtualMachineScaleSets), + }, false), + }, + + "vm_size": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + // TODO: can we remove this? + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validate.NoEmptyStrings, + }, + + // Optional + "availability_zones": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "enable_auto_scaling": { + Type: schema.TypeBool, + Optional: true, + }, + + "enable_node_public_ip": { + Type: schema.TypeBool, + Optional: true, + }, + + "max_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "max_pods": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "min_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + + "node_taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "os_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + + "os_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: string(containerservice.Linux), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Linux), + string(containerservice.Windows), + }, false), + }, + + "vnet_subnet_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + }, + } +} + +func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedClusterAgentPoolProfile, error) { + input := d.Get("default_node_pool").([]interface{}) + // TODO: in 2.0 make this Required + // this exists to allow users to migrate to default_node_pool + if len(input) == 0 { + return nil, nil + } + + raw := input[0].(map[string]interface{}) + + enableAutoScaling := raw["enable_auto_scaling"].(bool) + profile := containerservice.ManagedClusterAgentPoolProfile{ + EnableAutoScaling: utils.Bool(enableAutoScaling), + EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), + Name: utils.String(raw["name"].(string)), + OsType: containerservice.OSType(raw["os_type"].(string)), + Type: containerservice.AgentPoolType(raw["type"].(string)), + VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), + + //// TODO: support these in time + // OrchestratorVersion: nil, + // ScaleSetEvictionPolicy: "", + // ScaleSetPriority: "", + } + + availabilityZonesRaw := raw["availability_zones"].([]interface{}) + // TODO: can we remove the `if > 0` here? + if availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw); len(*availabilityZones) > 0 { + profile.AvailabilityZones = availabilityZones + } + if maxPods := int32(raw["max_pods"].(int)); maxPods > 0 { + profile.MaxPods = utils.Int32(maxPods) + } + + nodeTaintsRaw := raw["node_taints"].([]interface{}) + // TODO: can we remove the `if > 0` here? + if nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw); len(*nodeTaints) > 0 { + profile.NodeTaints = nodeTaints + } + + if osDiskSizeGB := int32(raw["os_disk_size_gb"].(int)); osDiskSizeGB > 0 { + profile.OsDiskSizeGB = utils.Int32(osDiskSizeGB) + } + + if vnetSubnetID := raw["vnet_subnet_id"].(string); vnetSubnetID != "" { + profile.VnetSubnetID = utils.String(vnetSubnetID) + } + + count := raw["count"].(int) + maxCount := raw["max_count"].(int) + minCount := raw["min_count"].(int) + + // Count must be set for the initial creation when using AutoScaling but cannot be updated + autoScaledCluster := enableAutoScaling && d.IsNewResource() + + // however it must always be sent for manually scaled clusters + manuallyScaledCluster := !enableAutoScaling + + if autoScaledCluster || manuallyScaledCluster { + profile.Count = utils.Int32(int32(count)) + } + + if enableAutoScaling { + if maxCount > 0 { + profile.MaxCount = utils.Int32(int32(maxCount)) + } else { + return nil, fmt.Errorf("`max_count` must be configured when `enable_auto_scaling` is set to `true`") + } + + if minCount > 0 { + profile.MinCount = utils.Int32(int32(minCount)) + } else { + return nil, fmt.Errorf("`min_count` must be configured when `enable_auto_scaling` is set to `true`") + } + + if minCount > maxCount { + return nil, fmt.Errorf("`max_count` must be >= `min_count`") + } + } else if minCount > 0 || maxCount > 0 { + return nil, fmt.Errorf("`max_count` and `min_count` must be set to `0` when enable_auto_scaling is set to `false`") + } + + return &[]containerservice.ManagedClusterAgentPoolProfile{ + profile, + }, nil +} + +func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *schema.ResourceData) (*[]interface{}, error) { + if input == nil { + return &[]interface{}{}, nil + } + + agentPool, err := findDefaultNodePool(input, d) + if err != nil { + return nil, err + } + + var availabilityZones []string + if agentPool.AvailabilityZones != nil { + availabilityZones = *agentPool.AvailabilityZones + } + + count := 0 + if agentPool.Count != nil { + count = int(*agentPool.Count) + } + + enableAutoScaling := false + if agentPool.EnableAutoScaling != nil { + enableAutoScaling = *agentPool.EnableAutoScaling + } + + enableNodePublicIP := false + if agentPool.EnableNodePublicIP != nil { + enableNodePublicIP = *agentPool.EnableNodePublicIP + } + + maxCount := 0 + if agentPool.MaxCount != nil { + maxCount = int(*agentPool.MaxCount) + } + + maxPods := 0 + if agentPool.MaxPods != nil { + maxPods = int(*agentPool.MaxPods) + } + + minCount := 0 + if agentPool.MinCount != nil { + minCount = int(*agentPool.MinCount) + } + + name := "" + if agentPool.Name != nil { + name = *agentPool.Name + } + + var nodeTaints []string + if agentPool.NodeTaints != nil { + nodeTaints = *agentPool.NodeTaints + } + + osDiskSizeGB := 0 + if agentPool.OsDiskSizeGB != nil { + osDiskSizeGB = int(*agentPool.OsDiskSizeGB) + } + + vnetSubnetId := "" + if agentPool.VnetSubnetID != nil { + vnetSubnetId = *agentPool.VnetSubnetID + } + + return &[]interface{}{ + map[string]interface{}{ + "availability_zones": availabilityZones, + "count": count, + "enable_auto_scaling": enableAutoScaling, + "enable_node_public_ip": enableNodePublicIP, + "max_count": maxCount, + "max_pods": maxPods, + "min_count": minCount, + "name": name, + "node_taints": nodeTaints, + "os_disk_size_gb": osDiskSizeGB, + "os_type": string(agentPool.OsType), + "type": string(agentPool.Type), + "vm_size": string(agentPool.VMSize), + "vnet_subnet_id": vnetSubnetId, + }, + }, nil +} + +func findDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *schema.ResourceData) (*containerservice.ManagedClusterAgentPoolProfile, error) { + // first try loading this from the Resource Data if possible (e.g. when Created) + defaultNodePoolName := d.Get("default_node_pool.0.name") + + var agentPool *containerservice.ManagedClusterAgentPoolProfile + if defaultNodePoolName != "" { + // find it + for _, v := range *input { + if v.Name != nil && *v.Name == defaultNodePoolName { + agentPool = &v + break + } + } + } else { + // otherwise we need to fall back to the name of the first agent pool + for _, v := range *input { + if v.Name == nil { + continue + } + + defaultNodePoolName = *v.Name + agentPool = &v + break + } + + if defaultNodePoolName == nil { + return nil, fmt.Errorf("Unable to Determine Default Agent Pool") + } + } + + if agentPool == nil { + return nil, fmt.Errorf("The Default Agent Pool %q was not found", defaultNodePoolName) + } + + return agentPool, nil +} diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 4365c5dba7fe..f2bbb7a118d1 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -23,6 +23,7 @@ import ( // TODO: more granular update tests // TODO: 4046 - splitting agent_pool_profile out into it's own resource +// TODO: document default_node_pool func resourceArmKubernetesCluster() *schema.Resource { return &schema.Resource{ @@ -108,9 +109,13 @@ func resourceArmKubernetesCluster() *schema.Resource { ValidateFunc: validate.NoEmptyStrings, }, + "default_node_pool": containers.SchemaDefaultNodePool(), + "agent_pool_profile": { - Type: schema.TypeList, - Required: true, + Type: schema.TypeList, + Optional: true, + Computed: true, + Deprecated: "This has been replaced by `default_node_pool` and will be removed in version 2.0 of the AzureRM Provider", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -591,10 +596,20 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} linuxProfileRaw := d.Get("linux_profile").([]interface{}) linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) - agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) - agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, true) + agentProfiles, err := containers.ExpandDefaultNodePool(d) if err != nil { - return err + return fmt.Errorf("Error expanding `default_node_pool`: %+v", err) + } + + // TODO: remove me in 2.0 + if agentProfiles == nil { + agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) + agentProfilesLegacy, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, true) + if err != nil { + return err + } + + agentProfiles = &agentProfilesLegacy } addOnProfilesRaw := d.Get("addon_profile").([]interface{}) @@ -633,7 +648,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} APIServerAuthorizedIPRanges: apiServerAuthorizedIPRanges, AadProfile: azureADProfile, AddonProfiles: addonProfiles, - AgentPoolProfiles: &agentProfiles, + AgentPoolProfiles: agentProfiles, DNSPrefix: utils.String(dnsPrefix), EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), @@ -730,15 +745,25 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} existing.ManagedClusterProperties.AddonProfiles = addonProfiles } - if d.HasChange("agent_pool_profile") { + if d.HasChange("default_node_pool") || d.HasChange("agent_pool_profile") { updateCluster = true - agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) - agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false) + agentProfiles, err := containers.ExpandDefaultNodePool(d) if err != nil { - return err + return fmt.Errorf("Error expanding `default_node_pool`: %+v", err) } - existing.ManagedClusterProperties.AgentPoolProfiles = &agentProfiles + // TODO: remove me in 2.0 + if agentProfiles == nil { + agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) + agentProfilesLegacy, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false) + if err != nil { + return err + } + + agentProfiles = &agentProfilesLegacy + } + + existing.ManagedClusterProperties.AgentPoolProfiles = agentProfiles } if d.HasChange("api_server_authorized_ip_ranges") { @@ -884,11 +909,20 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `addon_profile`: %+v", err) } + // TODO: remove me in 2.0 agentPoolProfiles := flattenKubernetesClusterAgentPoolProfiles(props.AgentPoolProfiles, resp.Fqdn) if err := d.Set("agent_pool_profile", agentPoolProfiles); err != nil { return fmt.Errorf("Error setting `agent_pool_profile`: %+v", err) } + flattenedDefaultNodePool, err := containers.FlattenDefaultNodePool(props.AgentPoolProfiles, d) + if err != nil { + return fmt.Errorf("Error flattening `default_node_pool`: %+v", err) + } + if err := d.Set("default_node_pool", flattenedDefaultNodePool); err != nil { + return fmt.Errorf("Error setting `default_node_pool`: %+v", err) + } + linuxProfile := flattenKubernetesClusterLinuxProfile(props.LinuxProfile) if err := d.Set("linux_profile", linuxProfile); err != nil { return fmt.Errorf("Error setting `linux_profile`: %+v", err) From ff6292344a9206d09c195d01a571f539527412da Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 15:35:43 +0100 Subject: [PATCH 11/45] r/kubernetes_cluster: sending the count unless the users opted out --- azurerm/internal/services/containers/kubernetes_nodepool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index fab8d16cc1d9..8aec153602ad 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -184,7 +184,7 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC autoScaledCluster := enableAutoScaling && d.IsNewResource() // however it must always be sent for manually scaled clusters - manuallyScaledCluster := !enableAutoScaling + manuallyScaledCluster := !enableAutoScaling && (d.IsNewResource() || d.HasChange("default_node_pool.0.count")) if autoScaledCluster || manuallyScaledCluster { profile.Count = utils.Int32(int32(count)) From 310c5f77bd168fb5fb152e1b3b65ff7dcb32480a Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 15:37:30 +0100 Subject: [PATCH 12/45] r/kubernetes_cluster: documenting the agent_pool_profile deprecation --- website/docs/guides/2.0-upgrade-guide.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/guides/2.0-upgrade-guide.html.markdown b/website/docs/guides/2.0-upgrade-guide.html.markdown index b2015379ea68..a90a51990537 100644 --- a/website/docs/guides/2.0-upgrade-guide.html.markdown +++ b/website/docs/guides/2.0-upgrade-guide.html.markdown @@ -158,7 +158,7 @@ The deprecated field `zone_type` will be removed. Private DNS Zones are now a se ### Data Source: `azurerm_kubernetes_cluster` -The deprecated field `dns_prefix` within the `agent_pool_profile` block will be removed. +The deprecated block `agent_pool_profile` will be removed. This has been replaced by the `default_node_pool` block. ### Data Source: `azurerm_network_interface` From 3b305e9f4cffeb7a482d8677978f66ff729d7d39 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 15:51:09 +0100 Subject: [PATCH 13/45] r/kubernetes_cluster: splitting the tests out --- azurerm/resource_arm_kubernetes_cluster.go | 1 + ...urce_arm_kubernetes_cluster_addons_test.go | 622 ++++ ...source_arm_kubernetes_cluster_auth_test.go | 325 +++ ...rce_arm_kubernetes_cluster_network_test.go | 805 ++++++ ...ource_arm_kubernetes_cluster_other_test.go | 562 ++++ ...rce_arm_kubernetes_cluster_scaling_test.go | 257 ++ .../resource_arm_kubernetes_cluster_test.go | 2524 ----------------- 7 files changed, 2572 insertions(+), 2524 deletions(-) create mode 100644 azurerm/resource_arm_kubernetes_cluster_addons_test.go create mode 100644 azurerm/resource_arm_kubernetes_cluster_auth_test.go create mode 100644 azurerm/resource_arm_kubernetes_cluster_network_test.go create mode 100644 azurerm/resource_arm_kubernetes_cluster_other_test.go create mode 100644 azurerm/resource_arm_kubernetes_cluster_scaling_test.go diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index f2bbb7a118d1..79ef21d9804a 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -111,6 +111,7 @@ func resourceArmKubernetesCluster() *schema.Resource { "default_node_pool": containers.SchemaDefaultNodePool(), + // TODO: remove in 2.0 "agent_pool_profile": { Type: schema.TypeList, Optional: true, diff --git a/azurerm/resource_arm_kubernetes_cluster_addons_test.go b/azurerm/resource_arm_kubernetes_cluster_addons_test.go new file mode 100644 index 000000000000..ea483eecf557 --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_addons_test.go @@ -0,0 +1,622 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", fmt.Sprintf("acctestsubnet-aci%d", ri)), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + disablingConfig := testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(ri, clientId, clientSecret, location) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: disablingConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", ""), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.azure_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.azure_policy.0.enabled", "true"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_addonProfileKubeDashboard(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_addonProfileKubeDashboard(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.kube_dashboard.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.kube_dashboard.0.enabled", "false"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_addonProfileOMS(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "true"), + resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + enablingConfig := testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, location) + disablingConfig := testAccAzureRMKubernetesCluster_addonProfileOMSDisabled(ri, clientId, clientSecret, location) + scaleDownConfig := testAccAzureRMKubernetesCluster_addonProfileOMSScaleWithoutBlock(ri, clientId, clientSecret, location) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: enablingConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "true"), + resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), + ), + }, + { + Config: disablingConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), + ), + }, + { + Config: scaleDownConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "2"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_addonProfileRouting(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_addonProfileRouting(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.http_application_routing.0.enabled"), + resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.http_application_routing.0.http_application_routing_zone_name"), + resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "0"), + ), + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["172.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "172.0.2.0/24" +} + +resource "azurerm_subnet" "test-aci" { + name = "acctestsubnet-aci%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "172.0.3.0/24" + + delegation { + name = "aciDelegation" + + service_delegation { + name = "Microsoft.ContainerInstance/containerGroups" + actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + } + } +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + aci_connector_linux { + enabled = true + subnet_name = "${azurerm_subnet.test-aci.name}" + } + } + + network_profile { + network_plugin = "azure" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["172.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "172.0.2.0/24" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + aci_connector_linux { + enabled = false + } + } + + network_profile { + network_plugin = "azure" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + azure_policy { + enabled = true + } + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileKubeDashboard(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + kube_dashboard { + enabled = false + } + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileOMS(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctest-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "PerGB2018" +} + +resource "azurerm_log_analytics_solution" "test" { + solution_name = "ContainerInsights" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + workspace_resource_id = "${azurerm_log_analytics_workspace.test.id}" + workspace_name = "${azurerm_log_analytics_workspace.test.name}" + + plan { + publisher = "Microsoft" + product = "OMSGallery/ContainerInsights" + } +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + oms_agent { + enabled = true + log_analytics_workspace_id = "${azurerm_log_analytics_workspace.test.id}" + } + } +} +`, rInt, location, rInt, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileOMSDisabled(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + oms_agent { + enabled = false + } + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileOMSScaleWithoutBlock(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_addonProfileRouting(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + addon_profile { + http_application_routing { + enabled = true + } + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} diff --git a/azurerm/resource_arm_kubernetes_cluster_auth_test.go b/azurerm/resource_arm_kubernetes_cluster_auth_test.go new file mode 100644 index 000000000000..c851e3528396 --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_auth_test.go @@ -0,0 +1,325 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttr(resourceName, "api_server_authorized_ip_ranges.#", "3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_enablePodSecurityPolicy(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_pod_security_policy", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + tenantId := os.Getenv("ARM_TENANT_ID") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, ""), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), + resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), + resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_secret"), + resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.tenant_id"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "kube_admin_config_raw"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + "role_based_access_control.0.azure_active_directory.0.server_app_secret", + }, + }, + { + // should be no changes since the default for Tenant ID comes from the Provider block + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, tenantId), + PlanOnly: true, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + "role_based_access_control.0.azure_active_directory.0.server_app_secret", + }, + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + api_server_authorized_ip_ranges = [ + "8.8.8.8/32", + "8.8.4.4/32", + "8.8.2.0/24", + ] +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + enable_pod_security_policy = true + + role_based_access_control { + enabled = true + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + role_based_access_control { + enabled = true + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt int, location, clientId, clientSecret, tenantId string) string { + return fmt.Sprintf(` +variable "tenant_id" { + default = "%s" +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + role_based_access_control { + enabled = true + + azure_active_directory { + server_app_id = "%s" + server_app_secret = "%s" + client_app_id = "%s" + tenant_id = "${var.tenant_id}" + } + } +} +`, tenantId, rInt, location, rInt, rInt, rInt, clientId, clientSecret, clientId, clientSecret, clientId) +} diff --git a/azurerm/resource_arm_kubernetes_cluster_network_test.go b/azurerm/resource_arm_kubernetes_cluster_network_test.go new file mode 100644 index 000000000000..7ac14c624eaa --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_network_test.go @@ -0,0 +1,805 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenet(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, testLocation(), "kubenet") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "kubenet"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, testLocation(), "kubenet") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "kubenet"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingAzure(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, testLocation(), "azure") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, testLocation(), "azure") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, testLocation(), "azure", "calico") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "calico"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, testLocation(), "azure", "calico") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "calico"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, testLocation(), "azure", "azure") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "azure"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, testLocation(), "azure", "azure") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "azure"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_enableNodePublicIP(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_node_public_ip", "true"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_internalNetwork(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_internalNetwork(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_pods", "60"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_standardLoadBalancer(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_standardLoadBalancer(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_standardLoadBalancerComplete(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), + ), + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_advancedNetworking(rInt int, clientId string, clientSecret string, location string, networkPlugin string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.1.0.0/24" + + # TODO: remove in 2.0 + lifecycle { + ignore_changes = ["route_table_id"] + } +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin) +} + +func testAccAzureRMKubernetesCluster_advancedNetworkingComplete(rInt int, clientId string, clientSecret string, location string, networkPlugin string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_route_table" "test" { + name = "akc-routetable-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + route { + name = "akc-route-%d" + address_prefix = "10.100.0.0/14" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = "10.10.1.1" + } +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.1.0.0/24" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_subnet_route_table_association" "test" { + subnet_id = "${azurerm_subnet.test.id}" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "%s" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin) +} + +func testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(rInt int, clientId string, clientSecret string, location string, networkPlugin string, networkPolicy string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.1.0.0/24" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "%s" + network_policy = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin, networkPolicy) +} + +func testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(rInt int, clientId string, clientSecret string, location string, networkPlugin string, networkPolicy string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_route_table" "test" { + name = "akc-routetable-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + route { + name = "akc-route-%d" + address_prefix = "10.100.0.0/14" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = "10.10.1.1" + } +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.1.0.0/24" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_subnet_route_table_association" "test" { + subnet_id = "${azurerm_subnet.test.id}" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "%s" + network_policy = "%s" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin, networkPolicy) +} + +func testAccAzureRMKubernetesCluster_enableNodePublicIP(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "1" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + enable_node_public_ip = true + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["172.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "172.0.2.0/24" + + # TODO: remove in 2.0 + lifecycle { + ignore_changes = ["route_table_id"] + } +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + max_pods = 60 + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_standardLoadBalancer(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.1.0.0/24" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "azure" + load_balancer_sku = "Standard" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_route_table" "test" { + name = "akc-routetable-%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + route { + name = "akc-route-%d" + address_prefix = "10.100.0.0/14" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = "10.10.1.1" + } +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + tags = { + environment = "Testing" + } +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.1.0.0/24" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_subnet_route_table_association" "test" { + subnet_id = "${azurerm_subnet.test.id}" + route_table_id = "${azurerm_route_table.test.id}" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + vnet_subnet_id = "${azurerm_subnet.test.id}" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "azure" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + load_balancer_sku = "Standard" + } +} +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) +} diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go new file mode 100644 index 000000000000..15a663661f16 --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -0,0 +1,562 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" +) + +func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Basic"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_requiresImport(t *testing.T) { + if !features.ShouldResourcesBeImported() { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + Config: testAccAzureRMKubernetesCluster_requiresImport(ri, clientId, clientSecret, location), + ExpectError: testRequiresImportError("azurerm_kubernetes_cluster"), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_nodeTaints(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_nodeTaints(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.1.node_taints.0", "key=value:NoSchedule"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_nodeResourceGroup(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_nodeResourceGroup(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", olderKubernetesVersion), + ), + }, + { + Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, currentKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", currentKubernetesVersion), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_virtualMachineScaleSets(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_virtualMachineScaleSets(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.1.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), + resource.TestCheckResourceAttrSet(resourceName, "windows_profile.0.admin_username"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "windows_profile.0.admin_password", + "service_principal.0.client_secret", + }, + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_requiresImport(rInt int, clientId, clientSecret, location string) string { + template := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster" "import" { + name = "${azurerm_kubernetes_cluster.test.name}" + location = "${azurerm_kubernetes_cluster.test.location}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" + dns_prefix = "${azurerm_kubernetes_cluster.test.dns_prefix}" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, template, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_linuxProfile(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_nodeTaints(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "1" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + + agent_pool_profile { + name = "pool1" + count = "1" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + node_taints = [ + "key=value:NoSchedule" + ] + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_nodeResourceGroup(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + node_resource_group = "acctestRGAKS-%d" + + agent_pool_profile { + name = "default" + count = "1" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_upgrade(rInt int, location, clientId, clientSecret, version string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, version, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_virtualMachineScaleSets(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + type = "VirtualMachineScaleSets" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + windows_profile { + admin_username = "azureuser" + admin_password = "pass_123-worD" + } + + agent_pool_profile { + name = "linux" + type = "VirtualMachineScaleSets" + count = "1" + vm_size = "Standard_DS2_v2" + max_pods = 30 + os_type = "Linux" + os_disk_size_gb = "30" + } + + agent_pool_profile { + name = "win" + type = "VirtualMachineScaleSets" + count = "1" + vm_size = "Standard_DS3_v2" + max_pods = 30 + os_type = "Windows" + os_disk_size_gb = "30" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "azure" + network_policy = "azure" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go new file mode 100644 index 000000000000..794889a3b5f0 --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -0,0 +1,257 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + initConfig := testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, testLocation()) + addAgentConfig := testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: initConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + Config: addAgentConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "2"), + ), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.#", "2"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.0", "1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.1", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_multipleAgents(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_multipleAgents(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.name", "pool1"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.1.name", "pool2"), + ), + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_addAgent(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "2" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "pool1" + min_count = "1" + max_count = "2" + enable_auto_scaling = "true" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + agent_pool_profile { + name = "pool1" + min_count = "1" + max_count = "2" + enable_auto_scaling = "true" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + availability_zones = ["1", "2"] + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "kubenet" + load_balancer_sku = "Standard" + } +} +`, rInt, location, rInt, rInt, olderKubernetesVersion, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_multipleAgents(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "pool1" + count = "1" + vm_size = "Standard_DS2_v2" + } + + agent_pool_profile { + name = "pool2" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 9187b7b4485d..9a5437595eb0 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -3,1033 +3,14 @@ package azurerm import ( "fmt" "net/http" - "os" - "testing" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" ) var olderKubernetesVersion = "1.14.8" var currentKubernetesVersion = "1.15.5" -func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Basic"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_requiresImport(t *testing.T) { - if !features.ShouldResourcesBeImported() { - t.Skip("Skipping since resources aren't required to be imported") - return - } - - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - location := testLocation() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, location), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - ), - }, - { - Config: testAccAzureRMKubernetesCluster_requiresImport(ri, clientId, clientSecret, location), - ExpectError: testRequiresImportError("azurerm_kubernetes_cluster"), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - location := testLocation() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - location := testLocation() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - tenantId := os.Getenv("ARM_TENANT_ID") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, ""), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "1"), - resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), - resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), - resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_secret"), - resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.tenant_id"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "1"), - resource.TestCheckResourceAttrSet(resourceName, "kube_admin_config_raw"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "service_principal.0.client_secret", - "role_based_access_control.0.azure_active_directory.0.server_app_secret", - }, - }, - { - // should be no changes since the default for Tenant ID comes from the Provider block - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, tenantId), - PlanOnly: true, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "service_principal.0.client_secret", - "role_based_access_control.0.azure_active_directory.0.server_app_secret", - }, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.1.max_pods"), - resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), - resource.TestCheckResourceAttrSet(resourceName, "windows_profile.0.admin_username"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "windows_profile.0.admin_password", - "service_principal.0.client_secret", - }, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - initConfig := testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, testLocation()) - addAgentConfig := testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: initConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - ), - }, - { - Config: addAgentConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "2"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - location := testLocation() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", olderKubernetesVersion), - ), - }, - { - Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", currentKubernetesVersion), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_internalNetwork(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_internalNetwork(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_pods", "60"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", fmt.Sprintf("acctestsubnet-aci%d", ri)), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - location := testLocation() - disablingConfig := testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(ri, clientId, clientSecret, location) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: disablingConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", ""), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileOMS(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "true"), - resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - location := testLocation() - enablingConfig := testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, location) - disablingConfig := testAccAzureRMKubernetesCluster_addonProfileOMSDisabled(ri, clientId, clientSecret, location) - scaleDownConfig := testAccAzureRMKubernetesCluster_addonProfileOMSScaleWithoutBlock(ri, clientId, clientSecret, location) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: enablingConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "true"), - resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), - ), - }, - { - Config: disablingConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), - ), - }, - { - Config: scaleDownConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "2"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileRouting(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileRouting(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "1"), - resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.http_application_routing.0.enabled"), - resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.http_application_routing.0.http_application_routing_zone_name"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "0"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileKubeDashboard(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileKubeDashboard(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.kube_dashboard.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.kube_dashboard.0.enabled", "false"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.azure_policy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "addon_profile.0.azure_policy.0.enabled", "true"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenet(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, testLocation(), "kubenet") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "kubenet"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, testLocation(), "kubenet") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "kubenet"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingAzure(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, testLocation(), "azure") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, testLocation(), "azure") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, testLocation(), "azure", "calico") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "calico"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, testLocation(), "azure", "calico") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "calico"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, testLocation(), "azure", "azure") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "azure"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, testLocation(), "azure", "azure") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "azure"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_standardLoadBalancer(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_standardLoadBalancer(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_standardLoadBalancerComplete(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttr(resourceName, "api_server_authorized_ip_ranges.#", "3"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_virtualMachineScaleSets(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_virtualMachineScaleSets(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.#", "2"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.0", "1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.1", "2"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_multipleAgents(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_multipleAgents(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.name", "pool1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.1.name", "pool2"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_nodeTaints(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_nodeTaints(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.1.node_taints.0", "key=value:NoSchedule"), - ), - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_nodeResourceGroup(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_nodeResourceGroup(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - -func TestAccAzureRMKubernetesCluster_enablePodSecurityPolicy(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "enable_pod_security_policy", "true"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - func testCheckAzureRMKubernetesClusterExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -1085,1508 +66,3 @@ func testCheckAzureRMKubernetesClusterDestroy(s *terraform.State) error { return nil } - -func TestAccAzureRMKubernetesCluster_enableNodePublicIP(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_node_public_ip", "true"), - ), - }, - }, - }) -} - -func testAccAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_requiresImport(rInt int, clientId, clientSecret, location string) string { - template := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location) - return fmt.Sprintf(` -%s - -resource "azurerm_kubernetes_cluster" "import" { - name = "${azurerm_kubernetes_cluster.test.name}" - location = "${azurerm_kubernetes_cluster.test.location}" - resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" - dns_prefix = "${azurerm_kubernetes_cluster.test.dns_prefix}" - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, template, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_linuxProfile(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - windows_profile { - admin_username = "azureuser" - admin_password = "pass_123-worD" - } - - agent_pool_profile { - name = "linux" - type = "VirtualMachineScaleSets" - count = "1" - vm_size = "Standard_DS2_v2" - max_pods = 30 - os_type = "Linux" - os_disk_size_gb = "30" - } - - agent_pool_profile { - name = "win" - type = "VirtualMachineScaleSets" - count = "1" - vm_size = "Standard_DS3_v2" - max_pods = 30 - os_type = "Windows" - os_disk_size_gb = "30" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "azure" - network_policy = "azure" - dns_service_ip = "10.10.0.10" - docker_bridge_cidr = "172.18.0.1/16" - service_cidr = "10.10.0.0/16" - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addAgent(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - role_based_access_control { - enabled = true - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt int, location, clientId, clientSecret, tenantId string) string { - return fmt.Sprintf(` -variable "tenant_id" { - default = "%s" -} - -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - role_based_access_control { - enabled = true - - azure_active_directory { - server_app_id = "%s" - server_app_secret = "%s" - client_app_id = "%s" - tenant_id = "${var.tenant_id}" - } - } -} -`, tenantId, rInt, location, rInt, rInt, rInt, clientId, clientSecret, clientId, clientSecret, clientId) -} - -func testAccAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["172.0.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "172.0.2.0/24" - - # TODO: remove in 2.0 - lifecycle { - ignore_changes = ["route_table_id"] - } -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - max_pods = 60 - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["172.0.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "172.0.2.0/24" -} - -resource "azurerm_subnet" "test-aci" { - name = "acctestsubnet-aci%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "172.0.3.0/24" - - delegation { - name = "aciDelegation" - - service_delegation { - name = "Microsoft.ContainerInstance/containerGroups" - actions = ["Microsoft.Network/virtualNetworks/subnets/action"] - } - } -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - aci_connector_linux { - enabled = true - subnet_name = "${azurerm_subnet.test-aci.name}" - } - } - - network_profile { - network_plugin = "azure" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["172.0.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "172.0.2.0/24" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - aci_connector_linux { - enabled = false - } - } - - network_profile { - network_plugin = "azure" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileOMS(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_log_analytics_workspace" "test" { - name = "acctest-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "PerGB2018" -} - -resource "azurerm_log_analytics_solution" "test" { - solution_name = "ContainerInsights" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - workspace_resource_id = "${azurerm_log_analytics_workspace.test.id}" - workspace_name = "${azurerm_log_analytics_workspace.test.name}" - - plan { - publisher = "Microsoft" - product = "OMSGallery/ContainerInsights" - } -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - oms_agent { - enabled = true - log_analytics_workspace_id = "${azurerm_log_analytics_workspace.test.id}" - } - } -} -`, rInt, location, rInt, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileOMSDisabled(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - oms_agent { - enabled = false - } - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileOMSScaleWithoutBlock(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileRouting(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - http_application_routing { - enabled = true - } - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileKubeDashboard(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - kube_dashboard { - enabled = false - } - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - addon_profile { - azure_policy { - enabled = true - } - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_upgrade(rInt int, location, clientId, clientSecret, version string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - kubernetes_version = "%s" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, version, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_advancedNetworking(rInt int, clientId string, clientSecret string, location string, networkPlugin string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.1.0.0/24" - - # TODO: remove in 2.0 - lifecycle { - ignore_changes = ["route_table_id"] - } -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "%s" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin) -} - -func testAccAzureRMKubernetesCluster_advancedNetworkingComplete(rInt int, clientId string, clientSecret string, location string, networkPlugin string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_route_table" "test" { - name = "akc-routetable-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "akc-route-%d" - address_prefix = "10.100.0.0/14" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = "10.10.1.1" - } -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.1.0.0/24" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_subnet_route_table_association" "test" { - subnet_id = "${azurerm_subnet.test.id}" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "%s" - dns_service_ip = "10.10.0.10" - docker_bridge_cidr = "172.18.0.1/16" - service_cidr = "10.10.0.0/16" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin) -} - -func testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(rInt int, clientId string, clientSecret string, location string, networkPlugin string, networkPolicy string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.1.0.0/24" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "%s" - network_policy = "%s" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin, networkPolicy) -} - -func testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(rInt int, clientId string, clientSecret string, location string, networkPlugin string, networkPolicy string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_route_table" "test" { - name = "akc-routetable-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "akc-route-%d" - address_prefix = "10.100.0.0/14" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = "10.10.1.1" - } -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.1.0.0/24" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_subnet_route_table_association" "test" { - subnet_id = "${azurerm_subnet.test.id}" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "%s" - network_policy = "%s" - dns_service_ip = "10.10.0.10" - docker_bridge_cidr = "172.18.0.1/16" - service_cidr = "10.10.0.0/16" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin, networkPolicy) -} - -func testAccAzureRMKubernetesCluster_standardLoadBalancer(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.1.0.0/24" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - kubernetes_version = "%s" - - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "azure" - load_balancer_sku = "Standard" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_route_table" "test" { - name = "akc-routetable-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "akc-route-%d" - address_prefix = "10.100.0.0/14" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = "10.10.1.1" - } -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.1.0.0/24" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_subnet_route_table_association" "test" { - subnet_id = "${azurerm_subnet.test.id}" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - kubernetes_version = "%s" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = "2" - vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "azure" - dns_service_ip = "10.10.0.10" - docker_bridge_cidr = "172.18.0.1/16" - service_cidr = "10.10.0.0/16" - load_balancer_sku = "Standard" - } -} -`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - api_server_authorized_ip_ranges = [ - "8.8.8.8/32", - "8.8.4.4/32", - "8.8.2.0/24", - ] -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_virtualMachineScaleSets(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - type = "VirtualMachineScaleSets" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_multipleAgents(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "pool1" - count = "1" - vm_size = "Standard_DS2_v2" - } - - agent_pool_profile { - name = "pool2" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "pool1" - min_count = "1" - max_count = "2" - enable_auto_scaling = "true" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - kubernetes_version = "%s" - - agent_pool_profile { - name = "pool1" - min_count = "1" - max_count = "2" - enable_auto_scaling = "true" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - availability_zones = ["1", "2"] - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } - - network_profile { - network_plugin = "kubenet" - load_balancer_sku = "Standard" - } -} -`, rInt, location, rInt, rInt, olderKubernetesVersion, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_nodeTaints(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - count = "1" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - } - - agent_pool_profile { - name = "pool1" - count = "1" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - node_taints = [ - "key=value:NoSchedule" - ] - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_nodeResourceGroup(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - node_resource_group = "acctestRGAKS-%d" - - agent_pool_profile { - name = "default" - count = "1" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - enable_pod_security_policy = true - - role_based_access_control { - enabled = true - } - - agent_pool_profile { - name = "default" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - -func testAccAzureRMKubernetesCluster_enableNodePublicIP(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - count = "1" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - enable_node_public_ip = true - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} From 7ca2fed43707d8411bc0456be7a1037eb5065d80 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 6 Nov 2019 16:14:16 +0100 Subject: [PATCH 14/45] r/kubernetes_cluster: adding tests for the existing setup --- ...urce_arm_kubernetes_cluster_legacy_test.go | 116 ++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 azurerm/resource_arm_kubernetes_cluster_legacy_test.go diff --git a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go new file mode 100644 index 000000000000..eeadb44ad282 --- /dev/null +++ b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go @@ -0,0 +1,116 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +// NOTE: all of the tests in this file are for functionality which will be removed in 2.0 + +func TestAccAzureRMKubernetesCluster_legacyAgentPoolProfileAvailabilitySet(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_legacyAgentPoolProfileAvailabilitySet(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "AvailabilitySet"), + ), + // since users are prompted to move to `default_node_pool` + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_legacyAgentPoolProfileVMSS(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_legacyAgentPoolProfileVMSS(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSet"), + ), + // since users are prompted to move to `default_node_pool` + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_legacyAgentPoolProfileAvailabilitySet(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_legacyAgentPoolProfileVMSS(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} From 437c90a15d1b9b220a7d9e684fbf532508a3ad31 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 7 Nov 2019 16:34:50 +0100 Subject: [PATCH 15/45] r/kubernetes_cluster: requiring that the default node pool is a linux pool --- .../services/containers/kubernetes_nodepool.go | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 8aec153602ad..76d06cd4c42d 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -106,17 +106,6 @@ func SchemaDefaultNodePool() *schema.Schema { ValidateFunc: validation.IntAtLeast(1), }, - "os_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: string(containerservice.Linux), - ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.Linux), - string(containerservice.Windows), - }, false), - }, - "vnet_subnet_id": { Type: schema.TypeString, Optional: true, @@ -143,10 +132,14 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC EnableAutoScaling: utils.Bool(enableAutoScaling), EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), Name: utils.String(raw["name"].(string)), - OsType: containerservice.OSType(raw["os_type"].(string)), Type: containerservice.AgentPoolType(raw["type"].(string)), VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), + // at this time the default node pool has to be Linux or the AKS cluster fails to provision with: + // Pods not in Running status: coredns-7fc597cc45-v5z7x,coredns-autoscaler-7ccc76bfbd-djl7j,metrics-server-cbd95f966-5rl97,tunnelfront-7d9884977b-wpbvn + // Windows agents can be configured via the separate node pool resource + OsType: containerservice.Linux, + //// TODO: support these in time // OrchestratorVersion: nil, // ScaleSetEvictionPolicy: "", @@ -292,7 +285,6 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "name": name, "node_taints": nodeTaints, "os_disk_size_gb": osDiskSizeGB, - "os_type": string(agentPool.OsType), "type": string(agentPool.Type), "vm_size": string(agentPool.VMSize), "vnet_subnet_id": vnetSubnetId, From 1c4b01145ed692ac642f7ea303e513d4b3396b54 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 7 Nov 2019 17:23:16 +0100 Subject: [PATCH 16/45] r/kubernetes_cluster: updating the tests --- azurerm/resource_arm_kubernetes_cluster.go | 1 + ...ource_arm_kubernetes_cluster_other_test.go | 262 ++++++++---------- ...rce_arm_kubernetes_cluster_scaling_test.go | 131 +++------ 3 files changed, 158 insertions(+), 236 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 79ef21d9804a..0809b3dbf970 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -764,6 +764,7 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} agentProfiles = &agentProfilesLegacy } + // TODO: switch to updating via the AgentPools client existing.ManagedClusterProperties.AgentPoolProfiles = agentProfiles } diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index 15a663661f16..dd4452c4357d 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -10,12 +10,52 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" ) -func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { +func TestAccAzureRMKubernetesCluster_basicAvailabilitySet(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, testLocation()) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_basicAvailabilitySet(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Basic"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_basicVMSS(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,7 +63,7 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_basicVMSS(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), @@ -37,7 +77,6 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Basic"), ), }, @@ -69,7 +108,7 @@ func TestAccAzureRMKubernetesCluster_requiresImport(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, location), + Config: testAccAzureRMKubernetesCluster_basicVMSS(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), ), @@ -87,7 +126,7 @@ func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -95,7 +134,7 @@ func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), @@ -104,7 +143,6 @@ func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), ), }, @@ -123,7 +161,7 @@ func TestAccAzureRMKubernetesCluster_nodeTaints(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_nodeTaints(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -131,12 +169,18 @@ func TestAccAzureRMKubernetesCluster_nodeTaints(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_nodeTaints(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.1.node_taints.0", "key=value:NoSchedule"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_taints.0", "key=value:NoSchedule"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -146,7 +190,7 @@ func TestAccAzureRMKubernetesCluster_nodeResourceGroup(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_nodeResourceGroup(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -154,7 +198,7 @@ func TestAccAzureRMKubernetesCluster_nodeResourceGroup(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_nodeResourceGroup(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), ), @@ -199,53 +243,12 @@ func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { }) } -func TestAccAzureRMKubernetesCluster_virtualMachineScaleSets(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_virtualMachineScaleSets(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), - resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, - }, - }, - }) -} - func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -253,7 +256,7 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), @@ -262,8 +265,7 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.1.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "default_node_pool.0.max_pods"), resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), resource.TestCheckResourceAttrSet(resourceName, "windows_profile.0.admin_username"), ), @@ -281,7 +283,7 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { }) } -func testAccAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string { +func testAccAzureRMKubernetesCluster_basicAvailabilitySet(rInt int, clientId string, clientSecret string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" @@ -290,13 +292,41 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - agent_pool_profile { + default_node_pool { name = "default" count = "1" + type = "AvailabilitySet" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_basicVMSS(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + count = 1 vm_size = "Standard_DS2_v2" } @@ -309,19 +339,19 @@ resource "azurerm_kubernetes_cluster" "test" { } func testAccAzureRMKubernetesCluster_requiresImport(rInt int, clientId, clientSecret, location string) string { - template := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location) + template := testAccAzureRMKubernetesCluster_basicVMSS(rInt, clientId, clientSecret, location) return fmt.Sprintf(` %s resource "azurerm_kubernetes_cluster" "import" { - name = "${azurerm_kubernetes_cluster.test.name}" - location = "${azurerm_kubernetes_cluster.test.location}" - resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" - dns_prefix = "${azurerm_kubernetes_cluster.test.dns_prefix}" + name = azurerm_kubernetes_cluster.test.name + location = azurerm_kubernetes_cluster.test.location + resource_group_name = azurerm_kubernetes_cluster.test.resource_group_name + dns_prefix = azurerm_kubernetes_cluster.test.dns_prefix - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -342,8 +372,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -354,9 +384,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -377,21 +407,13 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - agent_pool_profile { + default_node_pool { name = "default" - count = "1" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - } - - agent_pool_profile { - name = "pool1" - count = "1" - type = "VirtualMachineScaleSets" + count = 1 vm_size = "Standard_DS2_v2" node_taints = [ "key=value:NoSchedule" @@ -415,15 +437,14 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" node_resource_group = "acctestRGAKS-%d" - agent_pool_profile { + default_node_pool { name = "default" - count = "1" - type = "VirtualMachineScaleSets" + count = 1 vm_size = "Standard_DS2_v2" } @@ -444,8 +465,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" kubernetes_version = "%s" @@ -457,9 +478,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + node_pool_profile { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -471,34 +492,6 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, version, rInt, clientId, clientSecret) } -func testAccAzureRMKubernetesCluster_virtualMachineScaleSets(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "default" - type = "VirtualMachineScaleSets" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} - func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId string, clientSecret string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -522,27 +515,14 @@ resource "azurerm_kubernetes_cluster" "test" { windows_profile { admin_username = "azureuser" - admin_password = "pass_123-worD" - } - - agent_pool_profile { - name = "linux" - type = "VirtualMachineScaleSets" - count = "1" - vm_size = "Standard_DS2_v2" - max_pods = 30 - os_type = "Linux" - os_disk_size_gb = "30" + admin_password = "P@55W0rd1234!" } - agent_pool_profile { - name = "win" - type = "VirtualMachineScaleSets" - count = "1" - vm_size = "Standard_DS3_v2" - max_pods = 30 - os_type = "Windows" - os_disk_size_gb = "30" + # the default node pool /has/ to be Linux agents - Windows agents can be added via the node pools resource + default_node_pool { + name = "np" + count = 3 + vm_size = "Standard_DS2_v2" } service_principal { diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index 794889a3b5f0..97ae4b0f5453 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -14,8 +14,7 @@ func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - initConfig := testAccAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, testLocation()) - addAgentConfig := testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,15 +22,16 @@ func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: initConfig, + Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 1), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "2"), ), }, { - Config: addAgentConfig, + Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 2), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "2"), ), }, }, @@ -43,7 +43,7 @@ func TestAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -51,13 +51,13 @@ func TestAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.min_count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.max_count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_auto_scaling", "true"), ), }, { @@ -75,7 +75,7 @@ func TestAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -83,16 +83,16 @@ func TestAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_autoscaleWithAvailabilityZones(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.min_count", "1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_count", "2"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_auto_scaling", "true"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.#", "2"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.0", "1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.availability_zones.1", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.type", "VirtualMachineScaleSets"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.min_count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.max_count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.availability_zones.#", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.availability_zones.0", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.availability_zones.1", "2"), ), }, { @@ -105,31 +105,7 @@ func TestAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing }) } -func TestAccAzureRMKubernetesCluster_multipleAgents(t *testing.T) { - resourceName := "azurerm_kubernetes_cluster.test" - ri := tf.AccRandTimeInt() - clientId := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_multipleAgents(ri, clientId, clientSecret, testLocation()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.name", "pool1"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.1.name", "pool2"), - ), - }, - }, - }) -} - -func testAccAzureRMKubernetesCluster_addAgent(rInt int, clientId string, clientSecret string, location string) string { +func testAccAzureRMKubernetesCluster_addAgent(rInt int, clientId, clientSecret, location string, numberOfAgents int) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" @@ -138,13 +114,13 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = %d vm_size = "Standard_DS2_v2" } @@ -153,7 +129,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } } -`, rInt, location, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, numberOfAgents, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { @@ -165,16 +141,15 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" agent_pool_profile { name = "pool1" - min_count = "1" - max_count = "2" - enable_auto_scaling = "true" - type = "VirtualMachineScaleSets" + min_count = 1 + max_count = 2 + enable_auto_scaling = true vm_size = "Standard_DS2_v2" } @@ -195,17 +170,16 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" kubernetes_version = "%s" agent_pool_profile { name = "pool1" - min_count = "1" - max_count = "2" - enable_auto_scaling = "true" - type = "VirtualMachineScaleSets" + min_count = 1 + max_count = 2 + enable_auto_scaling = true vm_size = "Standard_DS2_v2" availability_zones = ["1", "2"] } @@ -222,36 +196,3 @@ resource "azurerm_kubernetes_cluster" "test" { } `, rInt, location, rInt, rInt, olderKubernetesVersion, clientId, clientSecret) } - -func testAccAzureRMKubernetesCluster_multipleAgents(rInt int, clientId string, clientSecret string, location string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" -} - -resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - dns_prefix = "acctestaks%d" - - agent_pool_profile { - name = "pool1" - count = "1" - vm_size = "Standard_DS2_v2" - } - - agent_pool_profile { - name = "pool2" - count = "1" - vm_size = "Standard_DS2_v2" - } - - service_principal { - client_id = "%s" - client_secret = "%s" - } -} -`, rInt, location, rInt, rInt, clientId, clientSecret) -} From 398fe83339c0893894c4af18f48bd25df6683caf Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 7 Nov 2019 17:47:08 +0100 Subject: [PATCH 17/45] r/kubernetes_cluster: fixing up the networking related tests --- ...rce_arm_kubernetes_cluster_network_test.go | 346 +++++++++++------- 1 file changed, 208 insertions(+), 138 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_network_test.go b/azurerm/resource_arm_kubernetes_cluster_network_test.go index 7ac14c624eaa..190a3cca83f1 100644 --- a/azurerm/resource_arm_kubernetes_cluster_network_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_network_test.go @@ -14,7 +14,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenet(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, testLocation(), "kubenet") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,12 +22,20 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenet(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, location, "kubenet"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "kubenet"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -37,7 +45,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete(t *testin ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, testLocation(), "kubenet") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -45,12 +53,20 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete(t *testin CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, location, "kubenet"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "kubenet"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -60,7 +76,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzure(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, testLocation(), "azure") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -68,12 +84,20 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzure(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworking(ri, clientId, clientSecret, location, "azure"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -83,7 +107,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete(t *testing. ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, testLocation(), "azure") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -91,12 +115,20 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete(t *testing. CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworkingComplete(ri, clientId, clientSecret, location, "azure"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -106,7 +138,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy(t *test ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, testLocation(), "azure", "calico") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -114,13 +146,21 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy(t *test CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, location, "azure", "calico"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "calico"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -130,7 +170,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, testLocation(), "azure", "calico") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -138,13 +178,21 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, location, "azure", "calico"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "calico"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -154,7 +202,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy(t *testing ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, testLocation(), "azure", "azure") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -162,13 +210,21 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy(t *testing CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicy(ri, clientId, clientSecret, location, "azure", "azure"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "azure"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -178,7 +234,7 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete(t ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, testLocation(), "azure", "azure") + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -186,13 +242,21 @@ func TestAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete(t CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_advancedNetworkingWithPolicyComplete(ri, clientId, clientSecret, location, "azure", "azure"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_plugin", "azure"), resource.TestCheckResourceAttr(resourceName, "network_profile.0.network_policy", "azure"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -202,7 +266,7 @@ func TestAccAzureRMKubernetesCluster_enableNodePublicIP(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -210,12 +274,20 @@ func TestAccAzureRMKubernetesCluster_enableNodePublicIP(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.enable_node_public_ip", "true"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_node_public_ip", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -225,7 +297,7 @@ func TestAccAzureRMKubernetesCluster_internalNetwork(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_internalNetwork(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -233,12 +305,20 @@ func TestAccAzureRMKubernetesCluster_internalNetwork(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_internalNetwork(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.max_pods", "60"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.max_pods", "60"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -248,7 +328,7 @@ func TestAccAzureRMKubernetesCluster_standardLoadBalancer(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_standardLoadBalancer(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -256,12 +336,20 @@ func TestAccAzureRMKubernetesCluster_standardLoadBalancer(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_standardLoadBalancer(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -271,7 +359,7 @@ func TestAccAzureRMKubernetesCluster_standardLoadBalancerComplete(t *testing.T) ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -279,12 +367,20 @@ func TestAccAzureRMKubernetesCluster_standardLoadBalancerComplete(t *testing.T) CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, }, }) } @@ -299,18 +395,14 @@ resource "azurerm_resource_group" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "10.1.0.0/24" # TODO: remove in 2.0 @@ -321,8 +413,8 @@ resource "azurerm_subnet" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -333,11 +425,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -361,8 +453,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_route_table" "test" { name = "akc-routetable-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name route { name = "akc-route-%d" @@ -375,31 +467,31 @@ resource "azurerm_route_table" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "10.1.0.0/24" - route_table_id = "${azurerm_route_table.test.id}" + + # TODO: remove in 2.0 + lifecycle { + ignore_changes = ["route_table_id"] + } } resource "azurerm_subnet_route_table_association" "test" { - subnet_id = "${azurerm_subnet.test.id}" - route_table_id = "${azurerm_route_table.test.id}" + subnet_id = azurerm_subnet.test.id + route_table_id = azurerm_route_table.test.id } resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -410,11 +502,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -442,25 +534,21 @@ resource "azurerm_resource_group" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "10.1.0.0/24" } resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -471,11 +559,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -500,8 +588,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_route_table" "test" { name = "akc-routetable-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name route { name = "akc-route-%d" @@ -514,31 +602,27 @@ resource "azurerm_route_table" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "10.1.0.0/24" - route_table_id = "${azurerm_route_table.test.id}" + route_table_id = azurerm_route_table.test.id } resource "azurerm_subnet_route_table_association" "test" { - subnet_id = "${azurerm_subnet.test.id}" - route_table_id = "${azurerm_route_table.test.id}" + subnet_id = azurerm_subnet.test.id + route_table_id = azurerm_route_table.test.id } resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -549,11 +633,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -581,14 +665,13 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - agent_pool_profile { + default_node_pool { name = "default" - count = "1" - type = "VirtualMachineScaleSets" + count = 1 vm_size = "Standard_DS2_v2" enable_node_public_ip = true } @@ -611,18 +694,14 @@ resource "azurerm_resource_group" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["172.0.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "172.0.2.0/24" # TODO: remove in 2.0 @@ -633,8 +712,8 @@ resource "azurerm_subnet" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -645,11 +724,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id max_pods = 60 } @@ -671,29 +750,24 @@ resource "azurerm_resource_group" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "10.1.0.0/24" } resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" kubernetes_version = "%s" - linux_profile { admin_username = "acctestuser%d" @@ -702,11 +776,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -731,8 +805,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_route_table" "test" { name = "akc-routetable-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name route { name = "akc-route-%d" @@ -745,31 +819,27 @@ resource "azurerm_route_table" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["10.1.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "10.1.0.0/24" - route_table_id = "${azurerm_route_table.test.id}" + route_table_id = azurerm_route_table.test.id } resource "azurerm_subnet_route_table_association" "test" { - subnet_id = "${azurerm_subnet.test.id}" - route_table_id = "${azurerm_route_table.test.id}" + subnet_id = azurerm_subnet.test.id + route_table_id = azurerm_route_table.test.id } resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" kubernetes_version = "%s" @@ -781,11 +851,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { From 1f55a450bc790e4904da156c2c4821f963cfa706 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 7 Nov 2019 18:05:07 +0100 Subject: [PATCH 18/45] r/kubernetes_cluster: updating the auth tests --- ...source_arm_kubernetes_cluster_auth_test.go | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_auth_test.go b/azurerm/resource_arm_kubernetes_cluster_auth_test.go index c851e3528396..e7cb42c0fb6b 100644 --- a/azurerm/resource_arm_kubernetes_cluster_auth_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_auth_test.go @@ -14,7 +14,7 @@ func TestAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +22,7 @@ func TestAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), @@ -36,7 +36,7 @@ func TestAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), - resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "default_node_pool.0.max_pods"), resource.TestCheckResourceAttr(resourceName, "api_server_authorized_ip_ranges.#", "3"), ), }, @@ -55,7 +55,7 @@ func TestAccAzureRMKubernetesCluster_enablePodSecurityPolicy(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -63,7 +63,7 @@ func TestAccAzureRMKubernetesCluster_enablePodSecurityPolicy(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "enable_pod_security_policy", "true"), @@ -179,13 +179,13 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -212,8 +212,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" enable_pod_security_policy = true @@ -221,9 +221,9 @@ resource "azurerm_kubernetes_cluster" "test" { enabled = true } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -256,9 +256,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -299,9 +299,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -317,7 +317,7 @@ resource "azurerm_kubernetes_cluster" "test" { server_app_id = "%s" server_app_secret = "%s" client_app_id = "%s" - tenant_id = "${var.tenant_id}" + tenant_id = var.tenant_id } } } From b6e523b62dc250e5f0303f99c1b8d4a36ae24e87 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 7 Nov 2019 18:21:51 +0100 Subject: [PATCH 19/45] r/kubernetes_cluster: updating the remaining tests --- ...urce_arm_kubernetes_cluster_addons_test.go | 206 +++++++++++------- ...rce_arm_kubernetes_cluster_scaling_test.go | 4 +- 2 files changed, 126 insertions(+), 84 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_addons_test.go b/azurerm/resource_arm_kubernetes_cluster_addons_test.go index ea483eecf557..1d900c44fa74 100644 --- a/azurerm/resource_arm_kubernetes_cluster_addons_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_addons_test.go @@ -14,7 +14,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +22,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), @@ -31,6 +31,12 @@ func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", fmt.Sprintf("acctestsubnet-aci%d", ri)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -41,7 +47,6 @@ func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(t *te clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") location := testLocation() - disablingConfig := testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(ri, clientId, clientSecret, location) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -49,16 +54,22 @@ func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(t *te CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: disablingConfig, + Config: testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -68,7 +79,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -76,13 +87,19 @@ func TestAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.azure_policy.0.enabled", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -92,7 +109,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileKubeDashboard(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileKubeDashboard(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -100,13 +117,19 @@ func TestAccAzureRMKubernetesCluster_addonProfileKubeDashboard(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_addonProfileKubeDashboard(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.kube_dashboard.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.kube_dashboard.0.enabled", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -116,7 +139,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMS(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -124,7 +147,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMS(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), @@ -133,6 +156,12 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMS(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -143,9 +172,6 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") location := testLocation() - enablingConfig := testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, location) - disablingConfig := testAccAzureRMKubernetesCluster_addonProfileOMSDisabled(ri, clientId, clientSecret, location) - scaleDownConfig := testAccAzureRMKubernetesCluster_addonProfileOMSScaleWithoutBlock(ri, clientId, clientSecret, location) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -153,38 +179,56 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: enablingConfig, + Config: testAccAzureRMKubernetesCluster_addonProfileOMS(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), ), }, { - Config: disablingConfig, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + { + Config: testAccAzureRMKubernetesCluster_addonProfileOMSDisabled(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), ), }, { - Config: scaleDownConfig, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + { + Config: testAccAzureRMKubernetesCluster_addonProfileOMSScaleWithoutBlock(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "2"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -194,7 +238,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileRouting(t *testing.T) { ri := tf.AccRandTimeInt() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - config := testAccAzureRMKubernetesCluster_addonProfileRouting(ri, clientId, clientSecret, testLocation()) + location := testLocation() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -202,7 +246,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileRouting(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: config, + Config: testAccAzureRMKubernetesCluster_addonProfileRouting(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "1"), @@ -211,6 +255,12 @@ func TestAccAzureRMKubernetesCluster_addonProfileRouting(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, }, }) } @@ -225,25 +275,21 @@ resource "azurerm_resource_group" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["172.0.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "172.0.2.0/24" } resource "azurerm_subnet" "test-aci" { name = "acctestsubnet-aci%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "172.0.3.0/24" delegation { @@ -258,8 +304,8 @@ resource "azurerm_subnet" "test-aci" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -270,11 +316,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -285,7 +331,7 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { aci_connector_linux { enabled = true - subnet_name = "${azurerm_subnet.test-aci.name}" + subnet_name = azurerm_subnet.test-aci.name } } @@ -306,25 +352,21 @@ resource "azurerm_resource_group" "test" { resource "azurerm_virtual_network" "test" { name = "acctestvirtnet%d" address_space = ["172.0.0.0/16"] - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags = { - environment = "Testing" - } + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_subnet" "test" { name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name address_prefix = "172.0.2.0/24" } resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -335,11 +377,11 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" - vnet_subnet_id = "${azurerm_subnet.test.id}" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -369,8 +411,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -381,9 +423,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -410,8 +452,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -422,9 +464,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -451,17 +493,17 @@ resource "azurerm_resource_group" "test" { resource "azurerm_log_analytics_workspace" "test" { name = "acctest-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name sku = "PerGB2018" } resource "azurerm_log_analytics_solution" "test" { solution_name = "ContainerInsights" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - workspace_resource_id = "${azurerm_log_analytics_workspace.test.id}" - workspace_name = "${azurerm_log_analytics_workspace.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + workspace_resource_id = azurerm_log_analytics_workspace.test.id + workspace_name = azurerm_log_analytics_workspace.test.name plan { publisher = "Microsoft" @@ -471,8 +513,8 @@ resource "azurerm_log_analytics_solution" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -483,9 +525,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -497,7 +539,7 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { oms_agent { enabled = true - log_analytics_workspace_id = "${azurerm_log_analytics_workspace.test.id}" + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.id } } } @@ -513,8 +555,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -525,9 +567,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -554,8 +596,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -566,9 +608,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "2" + count = 2 vm_size = "Standard_DS2_v2" } @@ -589,8 +631,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" linux_profile { @@ -601,9 +643,9 @@ resource "azurerm_kubernetes_cluster" "test" { } } - agent_pool_profile { + default_node_pool { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index 97ae4b0f5453..121e05c7d483 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -145,7 +145,7 @@ resource "azurerm_kubernetes_cluster" "test" { resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - agent_pool_profile { + default_node_pool { name = "pool1" min_count = 1 max_count = 2 @@ -175,7 +175,7 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" kubernetes_version = "%s" - agent_pool_profile { + default_node_pool { name = "pool1" min_count = 1 max_count = 2 From 692f74034f8590dfad35303a75fd9a7bb2e543dd Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 8 Nov 2019 12:05:49 +0100 Subject: [PATCH 20/45] r/kubernetes_cluster: moving `windows_profile` further down --- azurerm/resource_arm_kubernetes_cluster.go | 40 +++++++++++----------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 0809b3dbf970..c357ac65a7c2 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -311,26 +311,6 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, - "windows_profile": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "admin_username": { - Type: schema.TypeString, - Required: true, - }, - "admin_password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ValidateFunc: validate.NoEmptyStrings, - }, - }, - }, - }, - "network_profile": { Type: schema.TypeList, Optional: true, @@ -473,6 +453,26 @@ func resourceArmKubernetesCluster() *schema.Resource { "tags": tags.Schema(), + "windows_profile": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_username": { + Type: schema.TypeString, + Required: true, + }, + "admin_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validate.NoEmptyStrings, + }, + }, + }, + }, + // Computed "fqdn": { Type: schema.TypeString, From 2ba3f1addcfa4fab0c5781a36725b99b53400b44 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 8 Nov 2019 14:00:30 +0100 Subject: [PATCH 21/45] r/kubernetes_cluster: switching to use a common ID parsing method --- .../services/containers/kubernetes_id.go | 69 +++++++++++++++++ .../services/containers/kubernetes_id_test.go | 74 +++++++++++++++++++ .../containers/kubernetes_nodepool.go | 1 + azurerm/resource_arm_kubernetes_cluster.go | 35 ++++----- 4 files changed, 159 insertions(+), 20 deletions(-) create mode 100644 azurerm/internal/services/containers/kubernetes_id.go create mode 100644 azurerm/internal/services/containers/kubernetes_id_test.go diff --git a/azurerm/internal/services/containers/kubernetes_id.go b/azurerm/internal/services/containers/kubernetes_id.go new file mode 100644 index 000000000000..1355559df86a --- /dev/null +++ b/azurerm/internal/services/containers/kubernetes_id.go @@ -0,0 +1,69 @@ +package containers + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type KubernetesClusterID struct { + Name string + ResourceGroup string + + ID azure.ResourceID +} + +func KubernetesClusterIDSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateKubernetesClusterID, + } +} + +func ParseKubernetesClusterID(id string) (*KubernetesClusterID, error) { + clusterId, err := azure.ParseAzureResourceID(id) + if err != nil { + return nil, err + } + + resourceGroup := clusterId.ResourceGroup + if resourceGroup == "" { + return nil, fmt.Errorf("%q is missing a Resource Group", id) + } + + clusterName := clusterId.Path["managedClusters"] + if clusterName == "" { + return nil, fmt.Errorf("%q is missing the `managedClusters` segment", id) + } + + output := KubernetesClusterID{ + Name: clusterName, + ResourceGroup: resourceGroup, + ID: *clusterId, + } + return &output, nil +} + +func ValidateKubernetesClusterID(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return + } + + id, err := azure.ParseAzureResourceID(v) + if err != nil { + errors = append(errors, fmt.Errorf("Can not parse %q as a Resource Id: %v", v, err)) + } + + if id != nil { + if id.Path["managedClusters"] == "" { + errors = append(errors, fmt.Errorf("The 'managedClusters' segment is missing from Resource ID %q", v)) + } + } + + return warnings, errors +} diff --git a/azurerm/internal/services/containers/kubernetes_id_test.go b/azurerm/internal/services/containers/kubernetes_id_test.go new file mode 100644 index 000000000000..22a032328a9c --- /dev/null +++ b/azurerm/internal/services/containers/kubernetes_id_test.go @@ -0,0 +1,74 @@ +package containers + +import ( + "testing" +) + +func TestKubernetesClusterID(t *testing.T) { + testData := []struct { + input string + expected *KubernetesClusterID + }{ + { + input: "", + expected: nil, + }, + { + input: "/subscriptions/00000000-0000-0000-0000-000000000000", + expected: nil, + }, + { + input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups", + expected: nil, + }, + { + input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/hello", + expected: nil, + }, + { + // wrong case + input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/hello/managedclusters/cluster1", + expected: nil, + }, + { + input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/hello/managedClusters/cluster1", + expected: &KubernetesClusterID{ + Name: "cluster1", + ResourceGroup: "hello", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q..", v.input) + actual, err := ParseKubernetesClusterID(v.input) + + // if we get something there shouldn't be an error + if v.expected != nil && err == nil { + continue + } + + // if nothing's expected we should get an error + if v.expected == nil && err != nil { + continue + } + + if v.expected == nil && actual == nil { + continue + } + + if v.expected == nil && actual != nil { + t.Fatalf("Expected nothing but got %+v", actual) + } + if v.expected != nil && actual == nil { + t.Fatalf("Expected %+v but got nil", actual) + } + + if v.expected.ResourceGroup != actual.ResourceGroup { + t.Fatalf("Expected ResourceGroup to be %q but got %q", v.expected.ResourceGroup, actual.ResourceGroup) + } + if v.expected.Name != actual.Name { + t.Fatalf("Expected Name to be %q but got %q", v.expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 76d06cd4c42d..4adb6cbfba74 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -56,6 +56,7 @@ func SchemaDefaultNodePool() *schema.Schema { }, }, + // TODO: make this node_count "count": { Type: schema.TypeInt, Optional: true, diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index c357ac65a7c2..a4f3eadf8c1a 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -694,13 +694,13 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} log.Printf("[INFO] preparing arguments for Managed Kubernetes Cluster update.") - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := containers.ParseKubernetesClusterID(d.Id()) if err != nil { return err } resourceGroup := id.ResourceGroup - name := id.Path["managedClusters"] + name := id.Name d.Partial(true) @@ -787,7 +787,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} existing.ManagedClusterProperties.LinuxProfile = linuxProfile } - // TODO: does this want to be split out if d.HasChange("network_profile") { updateCluster = true networkProfileRaw := d.Get("network_profile").([]interface{}) @@ -865,31 +864,29 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) ctx, cancel := timeouts.ForRead(meta.(*ArmClient).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := containers.ParseKubernetesClusterID(d.Id()) if err != nil { return err } - resGroup := id.ResourceGroup - name := id.Path["managedClusters"] - resp, err := client.Get(ctx, resGroup, name) + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] Managed Kubernetes Cluster %q was not found in Resource Group %q - removing from state!", name, resGroup) + log.Printf("[DEBUG] Managed Kubernetes Cluster %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) d.SetId("") return nil } - return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } - profile, err := client.GetAccessProfile(ctx, resGroup, name, "clusterUser") + profile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.Name, "clusterUser") if err != nil { - return fmt.Errorf("Error retrieving Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error retrieving Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) + d.Set("resource_group_name", id.ResourceGroup) if location := resp.Location; location != nil { d.Set("location", azure.NormalizeLocation(*location)) } @@ -952,9 +949,9 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) // adminProfile is only available for RBAC enabled clusters with AAD if props.AadProfile != nil { - adminProfile, err := client.GetAccessProfile(ctx, resGroup, name, "clusterAdmin") + adminProfile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.Name, "clusterAdmin") if err != nil { - return fmt.Errorf("Error retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(adminProfile) @@ -982,20 +979,18 @@ func resourceArmKubernetesClusterDelete(d *schema.ResourceData, meta interface{} ctx, cancel := timeouts.ForDelete(meta.(*ArmClient).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := containers.ParseKubernetesClusterID(d.Id()) if err != nil { return err } - resGroup := id.ResourceGroup - name := id.Path["managedClusters"] - future, err := client.Delete(ctx, resGroup, name) + future, err := client.Delete(ctx, id.ResourceGroup, id.Name) if err != nil { - return fmt.Errorf("Error deleting Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error deleting Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for the deletion of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error waiting for the deletion of Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } return nil From 7f398bfd581562fc9baeaf8786c29f5804562bbd Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 8 Nov 2019 14:05:14 +0100 Subject: [PATCH 22/45] d/kubernetes_cluster: fixing the tests --- azurerm/data_source_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index 93ef6d357f8b..8468cccc6790 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -574,7 +574,7 @@ func TestAccDataSourceAzureRMKubernetesCluster_enableNodePublicIP(t *testing.T) } func testAccDataSourceAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string { - r := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location) + r := testAccAzureRMKubernetesCluster_basicVMSS(rInt, clientId, clientSecret, location) return fmt.Sprintf(` %s From 90c09fa0c63f2ec8d2d000a278f7f7eba66fd2a4 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 8 Nov 2019 16:06:13 +0100 Subject: [PATCH 23/45] r/kubernetes_cluster: renaming `count` to `node_count` --- .../containers/kubernetes_nodepool.go | 18 +++---- azurerm/resource_arm_kubernetes_cluster.go | 1 - ...urce_arm_kubernetes_cluster_addons_test.go | 48 +++++++++--------- ...source_arm_kubernetes_cluster_auth_test.go | 24 ++++----- ...urce_arm_kubernetes_cluster_legacy_test.go | 4 +- ...rce_arm_kubernetes_cluster_network_test.go | 16 +++--- ...ource_arm_kubernetes_cluster_other_test.go | 50 +++++++++---------- ...rce_arm_kubernetes_cluster_scaling_test.go | 10 ++-- 8 files changed, 84 insertions(+), 87 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 4adb6cbfba74..3ec0df701c4e 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -56,14 +56,6 @@ func SchemaDefaultNodePool() *schema.Schema { }, }, - // TODO: make this node_count - "count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validation.IntBetween(1, 100), - }, - "enable_auto_scaling": { Type: schema.TypeBool, Optional: true, @@ -93,6 +85,12 @@ func SchemaDefaultNodePool() *schema.Schema { ValidateFunc: validation.IntBetween(1, 100), }, + "node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + "node_taints": { Type: schema.TypeList, Optional: true, @@ -170,7 +168,7 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC profile.VnetSubnetID = utils.String(vnetSubnetID) } - count := raw["count"].(int) + count := raw["node_count"].(int) maxCount := raw["max_count"].(int) minCount := raw["min_count"].(int) @@ -277,13 +275,13 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro return &[]interface{}{ map[string]interface{}{ "availability_zones": availabilityZones, - "count": count, "enable_auto_scaling": enableAutoScaling, "enable_node_public_ip": enableNodePublicIP, "max_count": maxCount, "max_pods": maxPods, "min_count": minCount, "name": name, + "node_count": count, "node_taints": nodeTaints, "os_disk_size_gb": osDiskSizeGB, "type": string(agentPool.Type), diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index a4f3eadf8c1a..cd9e5e33c4f0 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -22,7 +22,6 @@ import ( ) // TODO: more granular update tests -// TODO: 4046 - splitting agent_pool_profile out into it's own resource // TODO: document default_node_pool func resourceArmKubernetesCluster() *schema.Resource { diff --git a/azurerm/resource_arm_kubernetes_cluster_addons_test.go b/azurerm/resource_arm_kubernetes_cluster_addons_test.go index 1d900c44fa74..84565eb5d979 100644 --- a/azurerm/resource_arm_kubernetes_cluster_addons_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_addons_test.go @@ -58,7 +58,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled(t *te Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.aci_connector_linux.0.subnet_name", ""), @@ -183,7 +183,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"), @@ -200,7 +200,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "1"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), @@ -217,7 +217,7 @@ func TestAccAzureRMKubernetesCluster_addonProfileOMSToggle(t *testing.T) { Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.http_application_routing.#", "0"), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "2"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.#", "1"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id", ""), @@ -318,7 +318,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 1 + node_count = 1 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -379,7 +379,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 1 + node_count = 1 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -424,9 +424,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -465,9 +465,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -526,9 +526,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -568,9 +568,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -609,9 +609,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 2 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" } service_principal { @@ -644,9 +644,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { diff --git a/azurerm/resource_arm_kubernetes_cluster_auth_test.go b/azurerm/resource_arm_kubernetes_cluster_auth_test.go index e7cb42c0fb6b..e374db3c62d0 100644 --- a/azurerm/resource_arm_kubernetes_cluster_auth_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_auth_test.go @@ -184,9 +184,9 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -222,9 +222,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -257,9 +257,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -300,9 +300,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { diff --git a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go index eeadb44ad282..ee6f3c9e76c0 100644 --- a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go @@ -76,7 +76,7 @@ resource "azurerm_kubernetes_cluster" "test" { agent_pool_profile { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } @@ -103,7 +103,7 @@ resource "azurerm_kubernetes_cluster" "test" { agent_pool_profile { name = "default" - count = "1" + count = 1 vm_size = "Standard_DS2_v2" } diff --git a/azurerm/resource_arm_kubernetes_cluster_network_test.go b/azurerm/resource_arm_kubernetes_cluster_network_test.go index 190a3cca83f1..da27d1d3dc69 100644 --- a/azurerm/resource_arm_kubernetes_cluster_network_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_network_test.go @@ -427,7 +427,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -504,7 +504,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -561,7 +561,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -635,7 +635,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -671,7 +671,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 1 + node_count = 1 vm_size = "Standard_DS2_v2" enable_node_public_ip = true } @@ -726,7 +726,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id max_pods = 60 @@ -778,7 +778,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } @@ -853,7 +853,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" - count = 2 + node_count = 2 vm_size = "Standard_DS2_v2" vnet_subnet_id = azurerm_subnet.test.id } diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index dd4452c4357d..62a7307987b0 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -297,10 +297,10 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - count = "1" - type = "AvailabilitySet" - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + type = "AvailabilitySet" + vm_size = "Standard_DS2_v2" } service_principal { @@ -325,9 +325,9 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -350,9 +350,9 @@ resource "azurerm_kubernetes_cluster" "import" { dns_prefix = azurerm_kubernetes_cluster.test.dns_prefix default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -385,9 +385,9 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -412,9 +412,9 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" node_taints = [ "key=value:NoSchedule" ] @@ -443,9 +443,9 @@ resource "azurerm_kubernetes_cluster" "test" { node_resource_group = "acctestRGAKS-%d" default_node_pool { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -479,9 +479,9 @@ resource "azurerm_kubernetes_cluster" "test" { } node_pool_profile { - name = "default" - count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" } service_principal { @@ -520,9 +520,9 @@ resource "azurerm_kubernetes_cluster" "test" { # the default node pool /has/ to be Linux agents - Windows agents can be added via the node pools resource default_node_pool { - name = "np" - count = 3 - vm_size = "Standard_DS2_v2" + name = "np" + node_count = 3 + vm_size = "Standard_DS2_v2" } service_principal { diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index 121e05c7d483..3a3fa17f9415 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -25,13 +25,13 @@ func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 1), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "2"), ), }, { Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 2), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "2"), ), }, }, @@ -119,9 +119,9 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - count = %d - vm_size = "Standard_DS2_v2" + name = "default" + node_count = %d + vm_size = "Standard_DS2_v2" } service_principal { From d70b769ceabafb5061c9f5e39a18bc8855c92c9b Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 8 Nov 2019 19:39:57 +0100 Subject: [PATCH 24/45] r/kubernetes_cluster: fixing the broken tests --- azurerm/resource_arm_kubernetes_cluster.go | 1 + ...source_arm_kubernetes_cluster_auth_test.go | 28 ++++++++++++++++--- ...urce_arm_kubernetes_cluster_legacy_test.go | 1 + ...ource_arm_kubernetes_cluster_other_test.go | 2 +- ...rce_arm_kubernetes_cluster_scaling_test.go | 2 +- 5 files changed, 28 insertions(+), 6 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index cd9e5e33c4f0..d35465f97081 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -271,6 +271,7 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, + // TODO: remove Computed in 2.0 "enable_pod_security_policy": { Type: schema.TypeBool, Optional: true, diff --git a/azurerm/resource_arm_kubernetes_cluster_auth_test.go b/azurerm/resource_arm_kubernetes_cluster_auth_test.go index e374db3c62d0..74c46071791a 100644 --- a/azurerm/resource_arm_kubernetes_cluster_auth_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_auth_test.go @@ -177,6 +177,20 @@ resource "azurerm_resource_group" "test" { location = "%s" } +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.1.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.1.0.0/24" +} + resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" location = azurerm_resource_group.test.location @@ -184,9 +198,10 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - node_count = 1 - vm_size = "Standard_DS2_v2" + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id } service_principal { @@ -194,13 +209,18 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } + network_profile { + network_plugin = "azure" + load_balancer_sku = "Standard" + } + api_server_authorized_ip_ranges = [ "8.8.8.8/32", "8.8.4.4/32", "8.8.2.0/24", ] } -`, rInt, location, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, rInt, rInt, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(rInt int, clientId string, clientSecret string, location string) string { diff --git a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go index ee6f3c9e76c0..e6eebd480ba6 100644 --- a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go @@ -104,6 +104,7 @@ resource "azurerm_kubernetes_cluster" "test" { agent_pool_profile { name = "default" count = 1 + type = "VirtualMachineScaleSet" vm_size = "Standard_DS2_v2" } diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index 62a7307987b0..a0d87fce0494 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -478,7 +478,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } - node_pool_profile { + default_node_pool { name = "default" node_count = 1 vm_size = "Standard_DS2_v2" diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index 3a3fa17f9415..60dee63cf0ea 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -25,7 +25,7 @@ func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 1), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "1"), ), }, { From 1662813fda70e6a3cf8af04fd25e1517d87d0898 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 8 Nov 2019 20:07:05 +0100 Subject: [PATCH 25/45] r/kubernetes_cluster: testing updates for the `windows_profile` block --- azurerm/resource_arm_kubernetes_cluster.go | 10 ++++---- ...ource_arm_kubernetes_cluster_other_test.go | 23 +++++++++++++++---- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index d35465f97081..c106a29b9138 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -927,11 +927,6 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `linux_profile`: %+v", err) } - windowsProfile := flattenKubernetesClusterWindowsProfile(props.WindowsProfile, d) - if err := d.Set("windows_profile", windowsProfile); err != nil { - return fmt.Errorf("Error setting `windows_profile`: %+v", err) - } - networkProfile := flattenKubernetesClusterNetworkProfile(props.NetworkProfile) if err := d.Set("network_profile", networkProfile); err != nil { return fmt.Errorf("Error setting `network_profile`: %+v", err) @@ -947,6 +942,11 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `service_principal`: %+v", err) } + windowsProfile := flattenKubernetesClusterWindowsProfile(props.WindowsProfile, d) + if err := d.Set("windows_profile", windowsProfile); err != nil { + return fmt.Errorf("Error setting `windows_profile`: %+v", err) + } + // adminProfile is only available for RBAC enabled clusters with AAD if props.AadProfile != nil { adminProfile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.Name, "clusterAdmin") diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index a0d87fce0494..344cc2287cb2 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -256,7 +256,7 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location), + Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location, "azureuser"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), @@ -279,6 +279,21 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { "service_principal.0.client_secret", }, }, + { + Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location, "ricksanchez"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "windows_profile.0.admin_password", + "service_principal.0.client_secret", + }, + }, }, }) } @@ -492,7 +507,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, version, rInt, clientId, clientSecret) } -func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId string, clientSecret string, location string) string { +func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId, clientSecret, location, username string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" @@ -514,7 +529,7 @@ resource "azurerm_kubernetes_cluster" "test" { } windows_profile { - admin_username = "azureuser" + admin_username = "%s" admin_password = "P@55W0rd1234!" } @@ -538,5 +553,5 @@ resource "azurerm_kubernetes_cluster" "test" { service_cidr = "10.10.0.0/16" } } -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, rInt, username, clientId, clientSecret) } From 6037d55849f2a4da83076d3ce69359a1075441ef Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 11 Nov 2019 09:50:22 +0100 Subject: [PATCH 26/45] r/kubernetes_cluster: adding a specific test for tags --- ...ource_arm_kubernetes_cluster_other_test.go | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index 344cc2287cb2..12757b8055c4 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -243,6 +243,46 @@ func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_tags(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_tags(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + { + Config: testAccAzureRMKubernetesCluster_tagsUpdated(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() @@ -471,6 +511,68 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, rInt, clientId, clientSecret) } +func testAccAzureRMKubernetesCluster_tags(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + tags { + dimension = "C-137" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_tagsUpdated(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + tags { + dimension = "D-99" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + func testAccAzureRMKubernetesCluster_upgrade(rInt int, location, clientId, clientSecret, version string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { From 412189cc97ac3c08d13afff91f1c79cf68c90aee Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 11 Nov 2019 12:33:57 +0100 Subject: [PATCH 27/45] r/kubernetes_cluster: fixing the agent type --- azurerm/resource_arm_kubernetes_cluster_legacy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go index e6eebd480ba6..a95469c639d7 100644 --- a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go @@ -104,7 +104,7 @@ resource "azurerm_kubernetes_cluster" "test" { agent_pool_profile { name = "default" count = 1 - type = "VirtualMachineScaleSet" + type = "VirtualMachineScaleSets" vm_size = "Standard_DS2_v2" } From 22474d3e69a4ad7177db7f5f176dd15b6bb22177 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 11 Nov 2019 13:15:41 +0100 Subject: [PATCH 28/45] r/kubernetes_cluster: pulling the value for `node_count` from `min_count` if unset for auto-scaled --- .../containers/kubernetes_nodepool.go | 5 ++ azurerm/resource_arm_kubernetes_cluster.go | 1 - ...rce_arm_kubernetes_cluster_scaling_test.go | 59 +++++++++++++++++++ 3 files changed, 64 insertions(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 3ec0df701c4e..17f53982a72e 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -179,6 +179,11 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC manuallyScaledCluster := !enableAutoScaling && (d.IsNewResource() || d.HasChange("default_node_pool.0.count")) if autoScaledCluster || manuallyScaledCluster { + // users creating an auto-scaled cluster may not set the `node_count` field - if so use `min_count` + if count == 0 && autoScaledCluster { + count = minCount + } + profile.Count = utils.Int32(int32(count)) } diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index c106a29b9138..2c0f7c765f54 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -21,7 +21,6 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -// TODO: more granular update tests // TODO: document default_node_pool func resourceArmKubernetesCluster() *schema.Resource { diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index 60dee63cf0ea..f85a449d2c19 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -38,6 +38,37 @@ func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_autoscaleNodeCountUnset(ri, clientId, clientSecret, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.min_count", "2"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.max_count", "4"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_auto_scaling", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_principal.0.client_secret"}, + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() @@ -132,6 +163,34 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, numberOfAgents, clientId, clientSecret) } +func testAccAzureRMKubernetesCluster_autoscaleNodeCountUnset(rInt int, clientId, clientSecret, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + min_count = 2 + max_count = 4 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + func testAccAzureRMKubernetesCluster_autoscaleNoAvailabilityZones(rInt int, clientId string, clientSecret string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { From 95e75e00306ea4f58bf9268f89201c8c61418527 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 11 Nov 2019 13:28:08 +0100 Subject: [PATCH 29/45] r/kubernetes_cluster: documenting the `default_node_pool` block --- .../docs/r/kubernetes_cluster.html.markdown | 57 ++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index d99bce267efa..0bff08ca27ec 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -74,7 +74,10 @@ The following arguments are supported: * `resource_group_name` - (Required) Specifies the Resource Group where the Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. -* `agent_pool_profile` - (Required) One or more `agent_pool_profile` blocks as defined below. +* `default_node_pool` - (Optional) A `default_node_pool` block as defined below. + +-> **NOTE:** The `default_node_pool` block will become required in 2.0 + * `dns_prefix` - (Required) DNS prefix specified when creating the managed cluster. Changing this forces a new resource to be created. @@ -82,6 +85,10 @@ The following arguments are supported: * `service_principal` - (Required) A `service_principal` block as documented below. +* `agent_pool_profile` - (Optional) One or more `agent_pool_profile` blocks as defined below. + +~> **NOTE:** The `agent_pool_profile` block has been superseded by the `default_node_pool` block and will be removed in 2.0 + --- A `aci_connector_linux` block supports the following: @@ -159,6 +166,8 @@ A `addon_profile` block supports the following: A `agent_pool_profile` block supports the following: +~> **NOTE:** The `agent_pool_profile` block has been superseded by the `default_node_pool` block and will be removed in 2.0 + * `name` - (Required) Unique name of the Agent Pool Profile in the context of the Subscription and Resource Group. Changing this forces a new resource to be created. * `count` - (Optional) Number of Agents (VMs) in the Pool. Possible values must be in the range of 1 to 100 (inclusive). Defaults to `1`. @@ -212,6 +221,52 @@ A `azure_policy` block supports the following: --- +A `default_node_pool` block supports the following: + +* `name` - (Required) The name which should be used for the default Kubernetes Node Pool. Changing this forces a new resource to be created. + +* `vm_size` - (Required) The size of the Virtual Machine, such as `Standard_DS2_v2`. + +* `availability_zones` - (Optional) A list of Availability Zones across which the Node Pool should be spread. + +-> **NOTE:** This requires that the `type` is set to `VirtualMachineScaleSets`. + +* `enable_auto_scaling` - (Optional) Should [the Kubernetes Auto Scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler) be enabled for this Node Pool? Defaults to `false`. + +-> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to the `node_count` field. + +-> **NOTE:** This requires that the `type` is set to `VirtualMachineScaleSets`. + +* `enable_node_public_ip` - (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to `false`. + +* `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. + +* `node_count` - (Optional) The number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. + +-> **NOTE:** If `enable_auto_scaling` is set to `true`, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to this field. + +-> **NOTE:** This is Required when `enable_auto_scaling` is set to `false`. + +* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). + +* `os_disk_size_gb` - (Optional) The size of the OS Disk which should be used for each agent in the Node Pool. Changing this forces a new resource to be created. + +* `type` - (Optional) The type of Node Pool which should be created. Possible values are `AvailabilitySet` and `VirtualMachineScaleSets`. Defaults to `VirtualMachineScaleSets`. + +-> **NOTE:** This default value differs from the default value for the `agent_pool_profile` block and matches a change to the default within AKS. + +* `vnet_subnet_id` - (Required) The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. + +~> **NOTE:** A Route Table must be configured on this Subnet. + +If `enable_auto_scaling` is enabled, then the following fields can also be configured: + +* `max_count` - (Optional) The maximum number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. + +* `min_count` - (Optional) The minimum number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. + +--- + A `http_application_routing` block supports the following: * `enabled` (Required) Is HTTP Application Routing Enabled? Changing this forces a new resource to be created. From eb39af53175cb8a44b2c093d5f11d8b64c951186 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 11 Nov 2019 17:44:16 +0100 Subject: [PATCH 30/45] r/kubernetes_cluster: making `node_count` computed for autoscaled nodes --- azurerm/internal/services/containers/kubernetes_nodepool.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 17f53982a72e..3c1f4cc66a0f 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -88,6 +88,7 @@ func SchemaDefaultNodePool() *schema.Schema { "node_count": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 100), }, From 93cfd6144e8fce77f18c5f3c24685b7bb966266e Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 11 Nov 2019 17:46:10 +0100 Subject: [PATCH 31/45] r/kubernetes_cluster: updating the default example --- .../docs/r/kubernetes_cluster.html.markdown | 31 +++++++------------ 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 0bff08ca27ec..373de00c8fd4 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -18,31 +18,22 @@ Manages a Managed Kubernetes Cluster (also known as AKS / Azure Kubernetes Servi This example provisions a basic Managed Kubernetes Cluster. Other examples of the `azurerm_kubernetes_cluster` resource can be found in [the `./examples/kubernetes` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/kubernetes) ```hcl +<<<<<<< HEAD resource "azurerm_resource_group" "example" { - name = "acctestRG1" - location = "East US" + name = "example-resources" + location = "West Europe" } resource "azurerm_kubernetes_cluster" "example" { - name = "acctestaks1" + name = "example-aks1" location = azurerm_resource_group.example.location resource_group_name = azurerm_resource_group.example.name - dns_prefix = "acctestagent1" - - agent_pool_profile { - name = "default" - count = 1 - vm_size = "Standard_D1_v2" - os_type = "Linux" - os_disk_size_gb = 30 - } + dns_prefix = "exampleaks1" - agent_pool_profile { - name = "pool2" - count = 1 - vm_size = "Standard_D2_v2" - os_type = "Linux" - os_disk_size_gb = 30 + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_D2_v2" } service_principal { @@ -56,11 +47,11 @@ resource "azurerm_kubernetes_cluster" "example" { } output "client_certificate" { - value = "${azurerm_kubernetes_cluster.example.kube_config.0.client_certificate}" + value = azurerm_kubernetes_cluster.example.kube_config.0.client_certificate } output "kube_config" { - value = "${azurerm_kubernetes_cluster.example.kube_config_raw}" + value = azurerm_kubernetes_cluster.example.kube_config_raw } ``` From 9fd7884c2b7f96b7e84936d53e3cd6a4c1898151 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 12 Nov 2019 21:06:51 +0200 Subject: [PATCH 32/45] r/kubernetes_cluster: fixing the docs/updating the publicip test --- .../data_source_kubernetes_cluster_test.go | 2 +- azurerm/resource_arm_kubernetes_cluster.go | 2 - ...rce_arm_kubernetes_cluster_network_test.go | 41 +++++++++++++++++-- .../docs/r/kubernetes_cluster.html.markdown | 23 ++++++----- 4 files changed, 50 insertions(+), 18 deletions(-) diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index 8468cccc6790..9ad76abfdba9 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -802,7 +802,7 @@ data "azurerm_kubernetes_cluster" "test" { } func testAccDataSourceAzureRMKubernetesCluster_enableNodePublicIP(rInt int, clientId string, clientSecret string, location string) string { - r := testAccAzureRMKubernetesCluster_enableNodePublicIP(rInt, clientId, clientSecret, location) + r := testAccAzureRMKubernetesCluster_enableNodePublicIP(rInt, clientId, clientSecret, location, true) return fmt.Sprintf(` %s diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 2c0f7c765f54..de42284b4ebe 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -21,8 +21,6 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -// TODO: document default_node_pool - func resourceArmKubernetesCluster() *schema.Resource { return &schema.Resource{ Create: resourceArmKubernetesClusterCreate, diff --git a/azurerm/resource_arm_kubernetes_cluster_network_test.go b/azurerm/resource_arm_kubernetes_cluster_network_test.go index da27d1d3dc69..437f37a4f3d8 100644 --- a/azurerm/resource_arm_kubernetes_cluster_network_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_network_test.go @@ -274,7 +274,40 @@ func TestAccAzureRMKubernetesCluster_enableNodePublicIP(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, location), + // Enabled + Config: testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, location, true), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_node_public_ip", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, + { + // Disabled + Config: testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, location, false), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_node_public_ip", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "service_principal.0.client_secret", + }, + }, + { + // Enabled + Config: testAccAzureRMKubernetesCluster_enableNodePublicIP(ri, clientId, clientSecret, location, true), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.enable_node_public_ip", "true"), @@ -656,7 +689,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret, networkPlugin, networkPolicy) } -func testAccAzureRMKubernetesCluster_enableNodePublicIP(rInt int, clientId string, clientSecret string, location string) string { +func testAccAzureRMKubernetesCluster_enableNodePublicIP(rInt int, clientId, clientSecret, location string, enabled bool) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" @@ -673,7 +706,7 @@ resource "azurerm_kubernetes_cluster" "test" { name = "default" node_count = 1 vm_size = "Standard_DS2_v2" - enable_node_public_ip = true + enable_node_public_ip = %t } service_principal { @@ -681,7 +714,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } } -`, rInt, location, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, enabled, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 373de00c8fd4..1868c5c1036c 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -224,19 +224,14 @@ A `default_node_pool` block supports the following: * `enable_auto_scaling` - (Optional) Should [the Kubernetes Auto Scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler) be enabled for this Node Pool? Defaults to `false`. --> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to the `node_count` field. - -> **NOTE:** This requires that the `type` is set to `VirtualMachineScaleSets`. * `enable_node_public_ip` - (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to `false`. * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. -* `node_count` - (Optional) The number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. - --> **NOTE:** If `enable_auto_scaling` is set to `true`, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to this field. +-> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to the `node_count` field. --> **NOTE:** This is Required when `enable_auto_scaling` is set to `false`. * `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). @@ -250,11 +245,19 @@ A `default_node_pool` block supports the following: ~> **NOTE:** A Route Table must be configured on this Subnet. -If `enable_auto_scaling` is enabled, then the following fields can also be configured: +If `enable_auto_scaling` is set to `true`, then the following fields can also be configured: + +* `max_count` - (Required) The maximum number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. -* `max_count` - (Optional) The maximum number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. +* `min_count` - (Required) The minimum number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. -* `min_count` - (Optional) The minimum number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. +* `node_count` - (Optional) The initial number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100` and between `min_count` and `max_count`. + +-> **NOTE:** If specified you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to this field. + +If `enable_auto_scaling` is set to `false`, then the following fields can also be configured: + +* `node_count` - (Required) The number of nodes which should exist in this Node Pool. If specified this must be between `1` and `100`. --- @@ -394,8 +397,6 @@ provider "kubernetes" { } ``` ---- - ## Import Managed Kubernetes Clusters can be imported using the `resource id`, e.g. From 89f418a4e84165e37f579263d77d7dfe99c1b1f3 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 13 Nov 2019 08:50:37 +0200 Subject: [PATCH 33/45] r/kubernetes_cluster: enabling AutoScaling for TestAccAzureRMKubernetesCluster_autoScalingNodeCountUnset --- azurerm/resource_arm_kubernetes_cluster_scaling_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index f85a449d2c19..a148bc150b46 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -177,10 +177,11 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%d" default_node_pool { - name = "default" - min_count = 2 - max_count = 4 - vm_size = "Standard_DS2_v2" + name = "default" + enable_auto_scaling = true + min_count = 2 + max_count = 4 + vm_size = "Standard_DS2_v2" } service_principal { From d553166e9daeffd484ea585d65869c491dd01452 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 13 Nov 2019 08:51:21 +0200 Subject: [PATCH 34/45] r/kubernetes_cluster: correct syntax for TestAccAzureRMKubernetesCluster_tags --- azurerm/resource_arm_kubernetes_cluster_other_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index 12757b8055c4..8dc44248542d 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -535,7 +535,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } - tags { + tags = { dimension = "C-137" } } @@ -566,7 +566,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } - tags { + tags = { dimension = "D-99" } } From f8b66e63247dce4510e6d6fc54943e3c6fbb9965 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 13 Nov 2019 08:53:20 +0200 Subject: [PATCH 35/45] r/kubernetes_cluster: fixing the assertion for TestAccAzureRMKubernetesCluster_legacyAgentPoolProfileVMSS --- azurerm/resource_arm_kubernetes_cluster_legacy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go index a95469c639d7..1847356a0a58 100644 --- a/azurerm/resource_arm_kubernetes_cluster_legacy_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_legacy_test.go @@ -52,7 +52,7 @@ func TestAccAzureRMKubernetesCluster_legacyAgentPoolProfileVMSS(t *testing.T) { Config: testAccAzureRMKubernetesCluster_legacyAgentPoolProfileVMSS(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSet"), + resource.TestCheckResourceAttr(resourceName, "agent_pool_profile.0.type", "VirtualMachineScaleSets"), ), // since users are prompted to move to `default_node_pool` ExpectNonEmptyPlan: true, From 3ede61022930892e851ac868a5859ef0cc6cb1ad Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 13 Nov 2019 14:18:58 +0200 Subject: [PATCH 36/45] r/kubernetes_cluster: fixing tests TestAccAzureRMKubernetesCluster_enableNodePublicIP - ForceNew for a PublicIP TestAccAzureRMKubernetesCluster_windowsProfile - username is forcenew --- .../containers/kubernetes_nodepool.go | 1 + azurerm/resource_arm_kubernetes_cluster.go | 1 + ...ource_arm_kubernetes_cluster_other_test.go | 23 ++++--------------- .../docs/r/kubernetes_cluster.html.markdown | 2 +- 4 files changed, 7 insertions(+), 20 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 3c1f4cc66a0f..05b8853eb2ef 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -64,6 +64,7 @@ func SchemaDefaultNodePool() *schema.Schema { "enable_node_public_ip": { Type: schema.TypeBool, Optional: true, + ForceNew: true, }, "max_count": { diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index de42284b4ebe..e34b11d03f51 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -459,6 +459,7 @@ func resourceArmKubernetesCluster() *schema.Resource { "admin_username": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "admin_password": { Type: schema.TypeString, diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index 8dc44248542d..881d23ed81eb 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -296,7 +296,7 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location, "azureuser"), + Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), @@ -319,21 +319,6 @@ func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { "service_principal.0.client_secret", }, }, - { - Config: testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, location, "ricksanchez"), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(resourceName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "windows_profile.0.admin_password", - "service_principal.0.client_secret", - }, - }, }, }) } @@ -609,7 +594,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, version, rInt, clientId, clientSecret) } -func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId, clientSecret, location, username string) string { +func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId, clientSecret, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { name = "acctestRG-%d" @@ -631,7 +616,7 @@ resource "azurerm_kubernetes_cluster" "test" { } windows_profile { - admin_username = "%s" + admin_username = "azureuser" admin_password = "P@55W0rd1234!" } @@ -655,5 +640,5 @@ resource "azurerm_kubernetes_cluster" "test" { service_cidr = "10.10.0.0/16" } } -`, rInt, location, rInt, rInt, rInt, username, clientId, clientSecret) +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) } diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 1868c5c1036c..44d15afafefa 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -171,7 +171,7 @@ A `agent_pool_profile` block supports the following: * `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler). Note that auto scaling feature requires the that the `type` is set to `VirtualMachineScaleSets` -* `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? +* `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. * `min_count` - (Optional) Minimum number of nodes for auto-scaling. From 4c28b2e278e3d9daf11e78381ae40187c74719fd Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 13 Nov 2019 14:32:12 +0200 Subject: [PATCH 37/45] r/kubernetes_cluster: updating the default node pool using the separate api --- .../internal/services/containers/client.go | 5 ++ .../containers/kubernetes_nodepool.go | 25 +++++++ azurerm/resource_arm_kubernetes_cluster.go | 73 +++++++++++-------- 3 files changed, 72 insertions(+), 31 deletions(-) diff --git a/azurerm/internal/services/containers/client.go b/azurerm/internal/services/containers/client.go index 8a5057e762a4..2f949fb14358 100644 --- a/azurerm/internal/services/containers/client.go +++ b/azurerm/internal/services/containers/client.go @@ -8,6 +8,7 @@ import ( ) type Client struct { + AgentPoolsClient *containerservice.AgentPoolsClient KubernetesClustersClient *containerservice.ManagedClustersClient GroupsClient *containerinstance.ContainerGroupsClient RegistriesClient *containerregistry.RegistriesClient @@ -37,7 +38,11 @@ func BuildClient(o *common.ClientOptions) *Client { KubernetesClustersClient := containerservice.NewManagedClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&KubernetesClustersClient.Client, o.ResourceManagerAuthorizer) + agentPoolsClient := containerservice.NewAgentPoolsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&agentPoolsClient.Client, o.ResourceManagerAuthorizer) + return &Client{ + AgentPoolsClient: &agentPoolsClient, KubernetesClustersClient: &KubernetesClustersClient, GroupsClient: &GroupsClient, RegistriesClient: &RegistriesClient, diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 05b8853eb2ef..df574850383c 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -118,6 +118,31 @@ func SchemaDefaultNodePool() *schema.Schema { } } +func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterAgentPoolProfile) containerservice.AgentPool { + defaultCluster := (*input)[0] + return containerservice.AgentPool{ + Name: defaultCluster.Name, + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + Count: defaultCluster.Count, + VMSize: defaultCluster.VMSize, + OsDiskSizeGB: defaultCluster.OsDiskSizeGB, + VnetSubnetID: defaultCluster.VnetSubnetID, + MaxPods: defaultCluster.MaxPods, + OsType: defaultCluster.OsType, + MaxCount: defaultCluster.MaxCount, + MinCount: defaultCluster.MinCount, + EnableAutoScaling: defaultCluster.EnableAutoScaling, + Type: defaultCluster.Type, + OrchestratorVersion: defaultCluster.OrchestratorVersion, + AvailabilityZones: defaultCluster.AvailabilityZones, + EnableNodePublicIP: defaultCluster.EnableNodePublicIP, + ScaleSetPriority: defaultCluster.ScaleSetPriority, + ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy, + NodeTaints: defaultCluster.NodeTaints, + }, + } +} + func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedClusterAgentPoolProfile, error) { input := d.Get("default_node_pool").([]interface{}) // TODO: in 2.0 make this Required diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index e34b11d03f51..430d8768cda0 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -685,7 +685,8 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} } func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).Containers.KubernetesClustersClient + nodePoolsClient := meta.(*ArmClient).Containers.AgentPoolsClient + clusterClient := meta.(*ArmClient).Containers.KubernetesClustersClient ctx, cancel := timeouts.ForUpdate(meta.(*ArmClient).StopContext, d) defer cancel() tenantId := meta.(*ArmClient).tenantId @@ -714,19 +715,19 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} ClientID: utils.String(clientId), Secret: utils.String(clientSecret), } - future, err := client.ResetServicePrincipalProfile(ctx, resourceGroup, name, params) + future, err := clusterClient.ResetServicePrincipalProfile(ctx, resourceGroup, name, params) if err != nil { return fmt.Errorf("Error updating Service Principal for Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { return fmt.Errorf("Error waiting for update of Service Principal for Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } log.Printf("[DEBUG] Updated the Service Principal for Kubernetes Cluster %q (Resource Group %q).", name, resourceGroup) } // we need to conditionally update the cluster - existing, err := client.Get(ctx, resourceGroup, name) + existing, err := clusterClient.Get(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -744,28 +745,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} existing.ManagedClusterProperties.AddonProfiles = addonProfiles } - if d.HasChange("default_node_pool") || d.HasChange("agent_pool_profile") { - updateCluster = true - agentProfiles, err := containers.ExpandDefaultNodePool(d) - if err != nil { - return fmt.Errorf("Error expanding `default_node_pool`: %+v", err) - } - - // TODO: remove me in 2.0 - if agentProfiles == nil { - agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) - agentProfilesLegacy, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false) - if err != nil { - return err - } - - agentProfiles = &agentProfilesLegacy - } - - // TODO: switch to updating via the AgentPools client - existing.ManagedClusterProperties.AgentPoolProfiles = agentProfiles - } - if d.HasChange("api_server_authorized_ip_ranges") { updateCluster = true apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() @@ -815,20 +794,52 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} if updateCluster { log.Printf("[DEBUG] Updating the Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup) - future, err := client.CreateOrUpdate(ctx, resourceGroup, name, existing) + future, err := clusterClient.CreateOrUpdate(ctx, resourceGroup, name, existing) if err != nil { return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } log.Printf("[DEBUG] Updated the Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup) } + // update the node pool using the separate API + if d.HasChange("default_node_pool") || d.HasChange("agent_pool_profile") { + log.Printf("[DEBUG] Updating of Default Node Pool..") + + agentProfiles, err := containers.ExpandDefaultNodePool(d) + if err != nil { + return fmt.Errorf("Error expanding `default_node_pool`: %+v", err) + } + + // TODO: remove me in 2.0 + if agentProfiles == nil { + agentProfilesRaw := d.Get("agent_pool_profile").([]interface{}) + agentProfilesLegacy, err := expandKubernetesClusterAgentPoolProfiles(agentProfilesRaw, false) + if err != nil { + return err + } + + agentProfiles = &agentProfilesLegacy + } + + agentProfile := containers.ConvertDefaultNodePoolToAgentPool(agentProfiles) + agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, resourceGroup, name, *agentProfile.Name, agentProfile) + if err != nil { + return fmt.Errorf("Error updating Default Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := agentPool.WaitForCompletionRef(ctx, nodePoolsClient.Client); err != nil { + return fmt.Errorf("Error waiting for update of Default Node Pool %q (Resource Group %q): %+v", name, resourceGroup, err) + } + log.Printf("[DEBUG] Updated Default Node Pool.") + } + // then roll the version of Kubernetes if necessary if d.HasChange("kubernetes_version") { - existing, err = client.Get(ctx, resourceGroup, name) + existing, err = clusterClient.Get(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving existing Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -840,12 +851,12 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Upgrading the version of Kubernetes to %q..", kubernetesVersion) existing.ManagedClusterProperties.KubernetesVersion = utils.String(kubernetesVersion) - future, err := client.CreateOrUpdate(ctx, resourceGroup, name, existing) + future, err := clusterClient.CreateOrUpdate(ctx, resourceGroup, name, existing) if err != nil { return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } From ae21a2b64d325cd5dae7f141c1b8b6c32c332e7a Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 19 Nov 2019 22:24:44 +0200 Subject: [PATCH 38/45] r/kubernetes_cluster: fixing a bug where count changes weren't detected Also adding a new test to confirm removing a node pool --- .../containers/kubernetes_nodepool.go | 2 +- ...rce_arm_kubernetes_cluster_scaling_test.go | 29 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index df574850383c..3983dc4093b6 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -203,7 +203,7 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC autoScaledCluster := enableAutoScaling && d.IsNewResource() // however it must always be sent for manually scaled clusters - manuallyScaledCluster := !enableAutoScaling && (d.IsNewResource() || d.HasChange("default_node_pool.0.count")) + manuallyScaledCluster := !enableAutoScaling && (d.IsNewResource() || d.HasChange("default_node_pool.0.node_count")) if autoScaledCluster || manuallyScaledCluster { // users creating an auto-scaled cluster may not set the `node_count` field - if so use `min_count` diff --git a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go index a148bc150b46..e7842e01acdc 100644 --- a/azurerm/resource_arm_kubernetes_cluster_scaling_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_scaling_test.go @@ -38,6 +38,35 @@ func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_removeAgent(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 2), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "2"), + ), + }, + { + Config: testAccAzureRMKubernetesCluster_addAgent(ri, clientId, clientSecret, location, 1), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_count", "1"), + ), + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() From 9ef91d739b46620105140d59de2f55302230feac Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 09:11:44 +0200 Subject: [PATCH 39/45] r/kubernetes_cluster: switching to use the value rather than keying in --- azurerm/resource_arm_kubernetes_cluster.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 430d8768cda0..86e59d60cdee 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -1036,9 +1036,8 @@ func flattenKubernetesClusterAccessProfile(profile containerservice.ManagedClust func expandKubernetesClusterAgentPoolProfiles(input []interface{}, isNewResource bool) ([]containerservice.ManagedClusterAgentPoolProfile, error) { profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0) - // TODO: fix this - for config_id := range input { - config := input[config_id].(map[string]interface{}) + for _, v := range input { + config := v.(map[string]interface{}) name := config["name"].(string) poolType := config["type"].(string) From b55a402df8419d881091102ac1785120eb8f5557 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 09:40:44 +0200 Subject: [PATCH 40/45] r/kubernetes_cluster: always submitting the value for `nodeTaints` and `availabilityZones` --- .../services/containers/kubernetes_nodepool.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 3983dc4093b6..dfc7ba736262 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -153,11 +153,18 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC raw := input[0].(map[string]interface{}) + availabilityZonesRaw := raw["availability_zones"].([]interface{}) + availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw) enableAutoScaling := raw["enable_auto_scaling"].(bool) + nodeTaintsRaw := raw["node_taints"].([]interface{}) + nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) + profile := containerservice.ManagedClusterAgentPoolProfile{ + AvailabilityZones: availabilityZones, EnableAutoScaling: utils.Bool(enableAutoScaling), EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), Name: utils.String(raw["name"].(string)), + NodeTaints: nodeTaints, Type: containerservice.AgentPoolType(raw["type"].(string)), VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), @@ -172,21 +179,10 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC // ScaleSetPriority: "", } - availabilityZonesRaw := raw["availability_zones"].([]interface{}) - // TODO: can we remove the `if > 0` here? - if availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw); len(*availabilityZones) > 0 { - profile.AvailabilityZones = availabilityZones - } if maxPods := int32(raw["max_pods"].(int)); maxPods > 0 { profile.MaxPods = utils.Int32(maxPods) } - nodeTaintsRaw := raw["node_taints"].([]interface{}) - // TODO: can we remove the `if > 0` here? - if nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw); len(*nodeTaints) > 0 { - profile.NodeTaints = nodeTaints - } - if osDiskSizeGB := int32(raw["os_disk_size_gb"].(int)); osDiskSizeGB > 0 { profile.OsDiskSizeGB = utils.Int32(osDiskSizeGB) } From 9d6e5e327100a779adbedbdc248ae3212a09ef50 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 09:49:19 +0200 Subject: [PATCH 41/45] r/kubernetes_cluster: making the VM SKU case sensitive --- .../services/containers/kubernetes_nodepool.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index dfc7ba736262..943b8da4a85f 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -39,12 +38,10 @@ func SchemaDefaultNodePool() *schema.Schema { }, "vm_size": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - // TODO: can we remove this? - DiffSuppressFunc: suppress.CaseDifference, - ValidateFunc: validate.NoEmptyStrings, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, }, // Optional From 3b9810760044eadbd3526cf122aa6a374e2d3afb Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 09:55:19 +0200 Subject: [PATCH 42/45] r/kubernetes_cluster: adding nil-check to sub-properties --- azurerm/internal/services/containers/kubernetes_addons.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index 2db94d74f042..90603312d8a1 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -136,7 +136,7 @@ func ExpandKubernetesAddOnProfiles(input []interface{}) map[string]*containerser addonProfiles := map[string]*containerservice.ManagedClusterAddonProfile{} httpApplicationRouting := profile["http_application_routing"].([]interface{}) - if len(httpApplicationRouting) > 0 { + if len(httpApplicationRouting) > 0 && httpApplicationRouting[0] != nil { value := httpApplicationRouting[0].(map[string]interface{}) enabled := value["enabled"].(bool) addonProfiles["httpApplicationRouting"] = &containerservice.ManagedClusterAddonProfile{ @@ -145,7 +145,7 @@ func ExpandKubernetesAddOnProfiles(input []interface{}) map[string]*containerser } omsAgent := profile["oms_agent"].([]interface{}) - if len(omsAgent) > 0 { + if len(omsAgent) > 0 && omsAgent[0] != nil { value := omsAgent[0].(map[string]interface{}) config := make(map[string]*string) enabled := value["enabled"].(bool) @@ -161,7 +161,7 @@ func ExpandKubernetesAddOnProfiles(input []interface{}) map[string]*containerser } aciConnector := profile["aci_connector_linux"].([]interface{}) - if len(aciConnector) > 0 { + if len(aciConnector) > 0 && aciConnector[0] != nil { value := aciConnector[0].(map[string]interface{}) config := make(map[string]*string) enabled := value["enabled"].(bool) @@ -177,7 +177,7 @@ func ExpandKubernetesAddOnProfiles(input []interface{}) map[string]*containerser } kubeDashboard := profile["kube_dashboard"].([]interface{}) - if len(kubeDashboard) > 0 { + if len(kubeDashboard) > 0 && kubeDashboard[0] != nil { value := kubeDashboard[0].(map[string]interface{}) enabled := value["enabled"].(bool) From 82e6437860bb25de1c842740308ac8df761656cc Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 10:01:21 +0200 Subject: [PATCH 43/45] r/kubernetes_cluster: updating the tag to allow scheduling pods --- azurerm/resource_arm_kubernetes_cluster_other_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_other_test.go b/azurerm/resource_arm_kubernetes_cluster_other_test.go index 881d23ed81eb..ae37bebea9b9 100644 --- a/azurerm/resource_arm_kubernetes_cluster_other_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_other_test.go @@ -172,7 +172,7 @@ func TestAccAzureRMKubernetesCluster_nodeTaints(t *testing.T) { Config: testAccAzureRMKubernetesCluster_nodeTaints(ri, clientId, clientSecret, location), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_taints.0", "key=value:NoSchedule"), + resource.TestCheckResourceAttr(resourceName, "default_node_pool.0.node_taints.0", "key=value:PreferNoSchedule"), ), }, { @@ -456,7 +456,7 @@ resource "azurerm_kubernetes_cluster" "test" { node_count = 1 vm_size = "Standard_DS2_v2" node_taints = [ - "key=value:NoSchedule" + "key=value:PreferNoSchedule" ] } From bcefb738565feb6dfc4a2d940f454bceada817c6 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 10:03:32 +0200 Subject: [PATCH 44/45] r/kubernetes_cluster: conditionally setting `availability_zones` --- .../services/containers/kubernetes_nodepool.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 943b8da4a85f..ffe0a0d22cb4 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -149,15 +149,11 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC } raw := input[0].(map[string]interface{}) - - availabilityZonesRaw := raw["availability_zones"].([]interface{}) - availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw) enableAutoScaling := raw["enable_auto_scaling"].(bool) nodeTaintsRaw := raw["node_taints"].([]interface{}) nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) profile := containerservice.ManagedClusterAgentPoolProfile{ - AvailabilityZones: availabilityZones, EnableAutoScaling: utils.Bool(enableAutoScaling), EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), Name: utils.String(raw["name"].(string)), @@ -176,6 +172,14 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC // ScaleSetPriority: "", } + availabilityZonesRaw := raw["availability_zones"].([]interface{}) + availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw) + + // otherwise: Standard Load Balancer is required for availability zone. + if len(*availabilityZones) > 0 { + profile.AvailabilityZones = availabilityZones + } + if maxPods := int32(raw["max_pods"].(int)); maxPods > 0 { profile.MaxPods = utils.Int32(maxPods) } From d928e22e5273e5b6afaf1d566452c17a215f98b9 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 20 Nov 2019 13:18:05 +0200 Subject: [PATCH 45/45] r/kubernetes_cluster: documentation fixes --- website/docs/r/kubernetes_cluster.html.markdown | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 44d15afafefa..f02a0c1a7803 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -18,7 +18,6 @@ Manages a Managed Kubernetes Cluster (also known as AKS / Azure Kubernetes Servi This example provisions a basic Managed Kubernetes Cluster. Other examples of the `azurerm_kubernetes_cluster` resource can be found in [the `./examples/kubernetes` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/kubernetes) ```hcl -<<<<<<< HEAD resource "azurerm_resource_group" "example" { name = "example-resources" location = "West Europe" @@ -226,13 +225,12 @@ A `default_node_pool` block supports the following: -> **NOTE:** This requires that the `type` is set to `VirtualMachineScaleSets`. +-> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to the `node_count` field. + * `enable_node_public_ip` - (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to `false`. * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. --> **NOTE:** If you're using AutoScaling, you may wish to use [Terraform's `ignore_changes` functionality](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore changes to the `node_count` field. - - * `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). * `os_disk_size_gb` - (Optional) The size of the OS Disk which should be used for each agent in the Node Pool. Changing this forces a new resource to be created.