From c30c39953c10b296ef4d978a7bd246dbc4fa7b96 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 30 Sep 2019 20:25:58 +0200 Subject: [PATCH 01/11] r/kubernetes_cluster: adding a separate update method --- azurerm/resource_arm_kubernetes_cluster.go | 95 ++++++++++++++++++++-- 1 file changed, 90 insertions(+), 5 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 6e38c8e2da18..bab71b8f8c6d 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -20,9 +20,9 @@ import ( func resourceArmKubernetesCluster() *schema.Resource { return &schema.Resource{ - Create: resourceArmKubernetesClusterCreateUpdate, + Create: resourceArmKubernetesClusterCreate, Read: resourceArmKubernetesClusterRead, - Update: resourceArmKubernetesClusterCreateUpdate, + Update: resourceArmKubernetesClusterUpdate, Delete: resourceArmKubernetesClusterDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -624,17 +624,17 @@ func resourceArmKubernetesCluster() *schema.Resource { } } -func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*ArmClient).containers.KubernetesClustersClient ctx := meta.(*ArmClient).StopContext tenantId := meta.(*ArmClient).tenantId - log.Printf("[INFO] preparing arguments for Managed Kubernetes Cluster create/update.") + log.Printf("[INFO] preparing arguments for Managed Kubernetes Cluster create.") resGroup := d.Get("resource_group_name").(string) name := d.Get("name").(string) - if features.ShouldResourcesBeImported() && d.IsNewResource() { + if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { @@ -717,6 +717,91 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter return resourceArmKubernetesClusterRead(d, meta) } +func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).containers.KubernetesClustersClient + ctx := meta.(*ArmClient).StopContext + tenantId := meta.(*ArmClient).tenantId + + log.Printf("[INFO] preparing arguments for Managed Kubernetes Cluster update.") + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resourceGroup := id.ResourceGroup + name := id.Path["managedClusters"] + + location := azure.NormalizeLocation(d.Get("location").(string)) + dnsPrefix := d.Get("dns_prefix").(string) + kubernetesVersion := d.Get("kubernetes_version").(string) + + linuxProfile := expandKubernetesClusterLinuxProfile(d) + agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d) + if err != nil { + return err + } + windowsProfile := expandKubernetesClusterWindowsProfile(d) + networkProfile := expandKubernetesClusterNetworkProfile(d) + servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) + addonProfiles := expandKubernetesClusterAddonProfiles(d) + + t := d.Get("tags").(map[string]interface{}) + + rbacRaw := d.Get("role_based_access_control").([]interface{}) + rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + + apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() + apiServerAuthorizedIPRanges := utils.ExpandStringSlice(apiServerAuthorizedIPRangesRaw) + + nodeResourceGroup := d.Get("node_resource_group").(string) + + enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) + + parameters := containerservice.ManagedCluster{ + Name: &name, + Location: &location, + ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + APIServerAuthorizedIPRanges: apiServerAuthorizedIPRanges, + AadProfile: azureADProfile, + AddonProfiles: addonProfiles, + AgentPoolProfiles: &agentProfiles, + DNSPrefix: utils.String(dnsPrefix), + EnableRBAC: utils.Bool(rbacEnabled), + KubernetesVersion: utils.String(kubernetesVersion), + LinuxProfile: linuxProfile, + WindowsProfile: windowsProfile, + NetworkProfile: networkProfile, + ServicePrincipalProfile: servicePrincipalProfile, + NodeResourceGroup: utils.String(nodeResourceGroup), + EnablePodSecurityPolicy: utils.Bool(enablePodSecurityPolicy), + }, + Tags: tags.Expand(t), + } + + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, parameters) + if err != nil { + return fmt.Errorf("Error creating/updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for completion of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + read, err := client.Get(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if read.ID == nil { + return fmt.Errorf("Cannot read ID for Managed Kubernetes Cluster %q (Resource Group %q)", name, resourceGroup) + } + + d.SetId(*read.ID) + + return resourceArmKubernetesClusterRead(d, meta) +} + func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*ArmClient).containers.KubernetesClustersClient ctx := meta.(*ArmClient).StopContext From 690055797da4ab8aeef6253213a745547d3fadee Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 30 Sep 2019 20:32:09 +0200 Subject: [PATCH 02/11] r/kubernetes_cluster: conditionally updating the sp --- azurerm/resource_arm_kubernetes_cluster.go | 27 +++++++++++++++++-- .../docs/r/kubernetes_cluster.html.markdown | 9 +++---- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index bab71b8f8c6d..de393835712a 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -223,13 +223,11 @@ func resourceArmKubernetesCluster() *schema.Resource { "client_id": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validate.NoEmptyStrings, }, "client_secret": { Type: schema.TypeString, - ForceNew: true, Required: true, Sensitive: true, ValidateFunc: validate.NoEmptyStrings, @@ -732,6 +730,29 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} resourceGroup := id.ResourceGroup name := id.Path["managedClusters"] + if d.HasChange("service_principal") { + log.Printf("[DEBUG] Updating the Service Principal for Kubernetes Cluster %q (Resource Group %q)..", name, resourceGroup) + servicePrincipals := d.Get("service_principal").([]interface{}) + servicePrincipalRaw := servicePrincipals[0].(map[string]interface{}) + + clientId := servicePrincipalRaw["client_id"].(string) + clientSecret := servicePrincipalRaw["client_secret"].(string) + + params := containerservice.ManagedClusterServicePrincipalProfile{ + ClientID: utils.String(clientId), + Secret: utils.String(clientSecret), + } + future, err := client.ResetServicePrincipalProfile(ctx, resourceGroup, name, params) + if err != nil { + return fmt.Errorf("Error updating Service Principal for Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of Service Principal for Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + log.Printf("[DEBUG] Updated the Service Principal for Kubernetes Cluster %q (Resource Group %q).", name, resourceGroup) + } + location := azure.NormalizeLocation(d.Get("location").(string)) dnsPrefix := d.Get("dns_prefix").(string) kubernetesVersion := d.Get("kubernetes_version").(string) @@ -758,10 +779,12 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) + // TODO: should these values be conditionally updated? parameters := containerservice.ManagedCluster{ Name: &name, Location: &location, ManagedClusterProperties: &containerservice.ManagedClusterProperties{ + // TODO: should this be conditionally updated APIServerAuthorizedIPRanges: apiServerAuthorizedIPRanges, AadProfile: azureADProfile, AddonProfiles: addonProfiles, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 2f021d740d87..e70f4ae123bc 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -24,8 +24,8 @@ resource "azurerm_resource_group" "test" { resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks1" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestagent1" agent_pool_profile { @@ -261,9 +261,9 @@ A `role_based_access_control` block supports the following: A `service_principal` block supports the following: -* `client_id` - (Required) The Client ID for the Service Principal. Changing this forces a new resource to be created. +* `client_id` - (Required) The Client ID for the Service Principal. -* `client_secret` - (Required) The Client Secret for the Service Principal. Changing this forces a new resource to be created. +* `client_secret` - (Required) The Client Secret for the Service Principal. --- @@ -271,7 +271,6 @@ A `ssh_key` block supports the following: * `key_data` - (Required) The Public SSH Key used to access the cluster. Changing this forces a new resource to be created. - ## Attributes Reference The following attributes are exported: From dc5d00a0aa84b00b68e5ecc07790e34893ffa807 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 08:23:30 +0200 Subject: [PATCH 03/11] r/kubernetes_cluster: ensuring all fields are set --- azurerm/resource_arm_kubernetes_cluster.go | 304 ++++++++++++--------- 1 file changed, 169 insertions(+), 135 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index de393835712a..73737e12da41 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -784,7 +784,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} Name: &name, Location: &location, ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - // TODO: should this be conditionally updated APIServerAuthorizedIPRanges: apiServerAuthorizedIPRanges, AadProfile: azureADProfile, AddonProfiles: addonProfiles, @@ -1203,61 +1202,69 @@ func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.Mana agentPoolProfiles := make([]interface{}, 0) for _, profile := range *profiles { - agentPoolProfile := make(map[string]interface{}) - - if profile.Type != "" { - agentPoolProfile["type"] = string(profile.Type) - } - + count := 0 if profile.Count != nil { - agentPoolProfile["count"] = int(*profile.Count) - } - - if profile.MinCount != nil { - agentPoolProfile["min_count"] = int(*profile.MinCount) - } - - if profile.MaxCount != nil { - agentPoolProfile["max_count"] = int(*profile.MaxCount) + count = int(*profile.Count) } + enableAutoScaling := false if profile.EnableAutoScaling != nil { - agentPoolProfile["enable_auto_scaling"] = *profile.EnableAutoScaling + enableAutoScaling = *profile.EnableAutoScaling } - agentPoolProfile["availability_zones"] = utils.FlattenStringSlice(profile.AvailabilityZones) - + fqdnVal := "" if fqdn != nil { // temporarily persist the parent FQDN here until `fqdn` is removed from the `agent_pool_profile` - agentPoolProfile["fqdn"] = *fqdn + fqdnVal = *fqdn } - if profile.Name != nil { - agentPoolProfile["name"] = *profile.Name + maxCount := 0 + if profile.MaxCount != nil { + maxCount = int(*profile.MaxCount) } - if profile.VMSize != "" { - agentPoolProfile["vm_size"] = string(profile.VMSize) + maxPods := 0 + if profile.MaxPods != nil { + maxPods = int(*profile.MaxPods) } - if profile.OsDiskSizeGB != nil { - agentPoolProfile["os_disk_size_gb"] = int(*profile.OsDiskSizeGB) + minCount := 0 + if profile.MinCount != nil { + minCount = int(*profile.MinCount) } - if profile.VnetSubnetID != nil { - agentPoolProfile["vnet_subnet_id"] = *profile.VnetSubnetID + name := "" + if profile.Name != nil { + name = *profile.Name } - if profile.OsType != "" { - agentPoolProfile["os_type"] = string(profile.OsType) + osDiskSizeGB := 0 + if profile.OsDiskSizeGB != nil { + osDiskSizeGB = int(*profile.OsDiskSizeGB) } - if profile.MaxPods != nil { - agentPoolProfile["max_pods"] = int(*profile.MaxPods) + subnetId := "" + if profile.VnetSubnetID != nil { + subnetId = *profile.VnetSubnetID } - if profile.NodeTaints != nil { - agentPoolProfile["node_taints"] = *profile.NodeTaints + agentPoolProfile := map[string]interface{}{ + "availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones), + "count": count, + "enable_auto_scaling": enableAutoScaling, + "max_count": maxCount, + "max_pods": maxPods, + "min_count": minCount, + "name": name, + "node_taints": utils.FlattenStringSlice(profile.NodeTaints), + "os_disk_size_gb": osDiskSizeGB, + "os_type": string(profile.OsType), + "type": string(profile.Type), + "vm_size": string(profile.VMSize), + "vnet_subnet_id": subnetId, + + // TODO: remove in 2.0 + "fqdn": fqdnVal, } agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile) @@ -1282,20 +1289,50 @@ func expandKubernetesClusterLinuxProfile(d *schema.ResourceData) *containerservi if key, ok := linuxKeys[0].(map[string]interface{}); ok { keyData = key["key_data"].(string) } - sshPublicKey := containerservice.SSHPublicKey{ - KeyData: &keyData, - } - - sshPublicKeys := []containerservice.SSHPublicKey{sshPublicKey} - profile := containerservice.LinuxProfile{ + return &containerservice.LinuxProfile{ AdminUsername: &adminUsername, SSH: &containerservice.SSHConfiguration{ - PublicKeys: &sshPublicKeys, + PublicKeys: &[]containerservice.SSHPublicKey{ + { + KeyData: &keyData, + }, + }, }, } +} - return &profile +func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + + adminUsername := "" + if username := profile.AdminUsername; username != nil { + adminUsername = *username + } + + sshKeys := make([]interface{}, 0) + if ssh := profile.SSH; ssh != nil { + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range *keys { + keyData := "" + if kd := sshKey.KeyData; kd != nil { + keyData = *kd + } + sshKeys = append(sshKeys, map[string]interface{}{ + "key_data": keyData, + }) + } + } + } + + return []interface{}{ + map[string]interface{}{ + "username": adminUsername, + "ssh_key": sshKeys, + }, + } } func expandKubernetesClusterWindowsProfile(d *schema.ResourceData) *containerservice.ManagedClusterWindowsProfile { @@ -1318,52 +1355,28 @@ func expandKubernetesClusterWindowsProfile(d *schema.ResourceData) *containerser return &profile } -func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile) []interface{} { - if profile == nil { - return []interface{}{} - } - - values := make(map[string]interface{}) - - if username := profile.AdminUsername; username != nil { - values["admin_username"] = *username - } - - sshKeys := make([]interface{}, 0) - if ssh := profile.SSH; ssh != nil { - if keys := ssh.PublicKeys; keys != nil { - for _, sshKey := range *keys { - outputs := make(map[string]interface{}) - if keyData := sshKey.KeyData; keyData != nil { - outputs["key_data"] = *keyData - } - sshKeys = append(sshKeys, outputs) - } - } - } - - values["ssh_key"] = sshKeys - - return []interface{}{values} -} - func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClusterWindowsProfile, d *schema.ResourceData) []interface{} { if profile == nil { return []interface{}{} } - values := make(map[string]interface{}) - + adminUsername := "" if username := profile.AdminUsername; username != nil { - values["admin_username"] = *username + adminUsername = *username } // admin password isn't returned, so let's look it up + adminPassword := "" if v, ok := d.GetOk("windows_profile.0.admin_password"); ok { - values["admin_password"] = v.(string) + adminPassword = v.(string) } - return []interface{}{values} + return []interface{}{ + map[string]interface{}{ + "admin_password": adminPassword, + "admin_username": adminUsername, + }, + } } func expandKubernetesClusterNetworkProfile(d *schema.ResourceData) *containerservice.NetworkProfileType { @@ -1375,9 +1388,7 @@ func expandKubernetesClusterNetworkProfile(d *schema.ResourceData) *containerser config := configs[0].(map[string]interface{}) networkPlugin := config["network_plugin"].(string) - networkPolicy := config["network_policy"].(string) - loadBalancerSku := config["load_balancer_sku"].(string) networkProfile := containerservice.NetworkProfileType{ @@ -1414,35 +1425,37 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro return []interface{}{} } - values := make(map[string]interface{}) - - values["network_plugin"] = profile.NetworkPlugin - - if profile.NetworkPolicy != "" { - values["network_policy"] = string(profile.NetworkPolicy) - } - - if profile.ServiceCidr != nil { - values["service_cidr"] = *profile.ServiceCidr - } - + dnsServiceIP := "" if profile.DNSServiceIP != nil { - values["dns_service_ip"] = *profile.DNSServiceIP + dnsServiceIP = *profile.DNSServiceIP } + dockerBridgeCidr := "" if profile.DockerBridgeCidr != nil { - values["docker_bridge_cidr"] = *profile.DockerBridgeCidr + dockerBridgeCidr = *profile.DockerBridgeCidr } - if profile.PodCidr != nil { - values["pod_cidr"] = *profile.PodCidr + serviceCidr := "" + if profile.ServiceCidr != nil { + serviceCidr = *profile.ServiceCidr } - if profile.LoadBalancerSku != "" { - values["load_balancer_sku"] = string(profile.LoadBalancerSku) + podCidr := "" + if profile.PodCidr != nil { + podCidr = *profile.PodCidr } - return []interface{}{values} + return []interface{}{ + map[string]interface{}{ + "dns_service_ip": dnsServiceIP, + "docker_bridge_cidr": dockerBridgeCidr, + "load_balancer_sku": string(profile.LoadBalancerSku), + "network_plugin": string(profile.NetworkPlugin), + "network_policy": string(profile.NetworkPolicy), + "pod_cidr": podCidr, + "service_cidr": serviceCidr, + }, + } } func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) (bool, *containerservice.ManagedClusterAADProfile) { @@ -1487,16 +1500,18 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana results := make([]interface{}, 0) if profile := input.AadProfile; profile != nil { - output := make(map[string]interface{}) + clientAppId := "" if profile.ClientAppID != nil { - output["client_app_id"] = *profile.ClientAppID + clientAppId = *profile.ClientAppID } + serverAppId := "" if profile.ServerAppID != nil { - output["server_app_id"] = *profile.ServerAppID + serverAppId = *profile.ServerAppID } + serverAppSecret := "" // since input.ServerAppSecret isn't returned we're pulling this out of the existing state (which won't work for Imports) // role_based_access_control.0.azure_active_directory.0.server_app_secret if existing, ok := d.GetOk("role_based_access_control"); ok { @@ -1507,17 +1522,23 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana azureADVal := azureADVals[0].(map[string]interface{}) v := azureADVal["server_app_secret"] if v != nil { - output["server_app_secret"] = v.(string) + serverAppSecret = v.(string) } } } } + tenantId := "" if profile.TenantID != nil { - output["tenant_id"] = *profile.TenantID + tenantId = *profile.TenantID } - results = append(results, output) + results = append(results, map[string]interface{}{ + "client_app_id": clientAppId, + "server_app_id": serverAppId, + "server_app_secret": serverAppSecret, + "tenant_id": tenantId, + }) } return []interface{}{ @@ -1541,12 +1562,10 @@ func expandAzureRmKubernetesClusterServicePrincipal(d *schema.ResourceData) *con clientId := config["client_id"].(string) clientSecret := config["client_secret"].(string) - principal := containerservice.ManagedClusterServicePrincipalProfile{ + return &containerservice.ManagedClusterServicePrincipalProfile{ ClientID: &clientId, Secret: &clientSecret, } - - return &principal } func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile, d *schema.ResourceData) []interface{} { @@ -1554,53 +1573,68 @@ func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerse return []interface{}{} } - values := make(map[string]interface{}) - - if clientId := profile.ClientID; clientId != nil { - values["client_id"] = *clientId + clientId := "" + if v := profile.ClientID; v != nil { + clientId = *v } // client secret isn't returned by the API so pass the existing value along - if v, ok := d.GetOk("service_principal.0.client_secret"); ok { - values["client_secret"] = v.(string) + clientSecret := "" + if sp, ok := d.GetOk("service_principal"); ok { + var val []interface{} + + // prior to 1.34 this was a *schema.Set, now it's a List - try both + if v, ok := sp.([]interface{}); ok { + val = v + } else if v, ok := sp.(*schema.Set); ok { + val = v.List() + } + + if len(val) > 0 { + raw := val[0].(map[string]interface{}) + clientSecret = raw["client_secret"].(string) + } } - return []interface{}{values} + return []interface{}{ + map[string]interface{}{ + "client_id": clientId, + "client_secret": clientSecret, + }, + } } func flattenKubernetesClusterKubeConfig(config kubernetes.KubeConfig) []interface{} { - values := make(map[string]interface{}) - // we don't size-check these since they're validated in the Parse method cluster := config.Clusters[0].Cluster user := config.Users[0].User name := config.Users[0].Name - values["host"] = cluster.Server - values["username"] = name - values["password"] = user.Token - values["client_certificate"] = user.ClientCertificteData - values["client_key"] = user.ClientKeyData - values["cluster_ca_certificate"] = cluster.ClusterAuthorityData - - return []interface{}{values} + return []interface{}{ + map[string]interface{}{ + "client_certificate": user.ClientCertificteData, + "client_key": user.ClientKeyData, + "cluster_ca_certificate": cluster.ClusterAuthorityData, + "host": cluster.Server, + "password": user.Token, + "username": name, + }, + } } func flattenKubernetesClusterKubeConfigAAD(config kubernetes.KubeConfigAAD) []interface{} { - values := make(map[string]interface{}) - // we don't size-check these since they're validated in the Parse method cluster := config.Clusters[0].Cluster name := config.Users[0].Name - values["host"] = cluster.Server - values["username"] = name - - values["password"] = "" - values["client_certificate"] = "" - values["client_key"] = "" - - values["cluster_ca_certificate"] = cluster.ClusterAuthorityData - - return []interface{}{values} + return []interface{}{ + map[string]interface{}{ + "client_certificate": "", + "client_key": "", + "cluster_ca_certificate": cluster.ClusterAuthorityData, + "host": cluster.Server, + "password": "", + "username": name, + }, + } } From 5c0bd224a2406f284d8972ebbc3079e5e8a34f86 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 08:30:35 +0200 Subject: [PATCH 04/11] r/kubernetes_cluster: fixing TestAccAzureRMKubernetesCluster_advancedNetworkingKubenet --- azurerm/resource_arm_kubernetes_cluster_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index c675b68f3e65..138c121aad41 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -1583,6 +1583,9 @@ resource "azurerm_subnet" "test" { resource_group_name = "${azurerm_resource_group.test.name}" virtual_network_name = "${azurerm_virtual_network.test.name}" address_prefix = "10.1.0.0/24" + + # required until 2.0 + ignore_changes = ["route_table_id"] } resource "azurerm_kubernetes_cluster" "test" { From 355228264d537153d64f2a6085f46cb9e7343420 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 08:38:06 +0200 Subject: [PATCH 05/11] r/kubernetes_cluster: using constant versions of kubernetes --- .../resource_arm_kubernetes_cluster_test.go | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 138c121aad41..a11cf23981f0 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -12,6 +12,9 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" ) +var olderKubernetesVersion = "1.13.10" +var currentKubernetesVersion = "1.14.6" + func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() @@ -294,17 +297,17 @@ func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, "1.12.7"), + Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, olderKubernetesVersion), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.12.7"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", olderKubernetesVersion), ), }, { - Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, "1.13.5"), + Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, currentKubernetesVersion), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.13.5"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", currentKubernetesVersion), ), }, }, @@ -1871,7 +1874,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.13.5" + kubernetes_version = "%s" linux_profile { @@ -1899,7 +1902,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "standard" } } -`, rInt, location, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_standardLoadBalancerComplete(rInt int, clientId string, clientSecret string, location string) string { @@ -1951,7 +1954,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.13.5" + kubernetes_version = "%s" linux_profile { admin_username = "acctestuser%d" @@ -1981,7 +1984,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "standard" } } -`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(rInt int, clientId string, clientSecret string, location string) string { @@ -2120,7 +2123,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.13.5" + kubernetes_version = "%s" agent_pool_profile { name = "pool1" @@ -2142,7 +2145,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "standard" } } -`, rInt, location, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, olderKubernetesVersion, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_nodeTaints(rInt int, clientId string, clientSecret string, location string) string { From 4f1d97339f163bf4c87a1b464156f7dc74614586 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 08:39:48 +0200 Subject: [PATCH 06/11] r/kubernetes_cluster: fixing TestAccAzureRMKubernetesCluster_internalNetwork --- azurerm/resource_arm_kubernetes_cluster_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index a11cf23981f0..eca5b69c19e8 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -1270,6 +1270,9 @@ resource "azurerm_subnet" "test" { resource_group_name = "${azurerm_resource_group.test.name}" virtual_network_name = "${azurerm_virtual_network.test.name}" address_prefix = "172.0.2.0/24" + + # TODO: remove in 2.0 + ignore_changes = ["route_table_id"] } resource "azurerm_kubernetes_cluster" "test" { From b8e516ef51de63b845c5f41dd72b77f94da913df Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 09:49:20 +0200 Subject: [PATCH 07/11] r/kubernetes_cluster: setting the correct field --- azurerm/resource_arm_kubernetes_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 73737e12da41..7b4671a4bf41 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -1329,8 +1329,8 @@ func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile return []interface{}{ map[string]interface{}{ - "username": adminUsername, - "ssh_key": sshKeys, + "admin_username": adminUsername, + "ssh_key": sshKeys, }, } } From 2abc24bee89970f4d716dc7c637ac7e6bad8b42e Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 09:53:55 +0200 Subject: [PATCH 08/11] r/kubernetes_cluster: adding an ignore_changes until 2.0 --- azurerm/resource_arm_kubernetes_cluster_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index eca5b69c19e8..e179a8f6ac4b 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -1272,7 +1272,9 @@ resource "azurerm_subnet" "test" { address_prefix = "172.0.2.0/24" # TODO: remove in 2.0 - ignore_changes = ["route_table_id"] + lifecycle { + ignore_changes = ["route_table_id"] + } } resource "azurerm_kubernetes_cluster" "test" { @@ -1590,8 +1592,10 @@ resource "azurerm_subnet" "test" { virtual_network_name = "${azurerm_virtual_network.test.name}" address_prefix = "10.1.0.0/24" - # required until 2.0 - ignore_changes = ["route_table_id"] + # TODO: remove in 2.0 + lifecycle { + ignore_changes = ["route_table_id"] + } } resource "azurerm_kubernetes_cluster" "test" { From c870d6ba361a593f44b8d9bf834054226472592a Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 09:58:24 +0200 Subject: [PATCH 09/11] linting --- azurerm/resource_arm_kubernetes_cluster.go | 1 - 1 file changed, 1 deletion(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 7b4671a4bf41..054aa1926973 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -1500,7 +1500,6 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana results := make([]interface{}, 0) if profile := input.AadProfile; profile != nil { - clientAppId := "" if profile.ClientAppID != nil { clientAppId = *profile.ClientAppID From 2649727baa06efe55b487ed4a10aed1e8fb11ec5 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 10:01:22 +0200 Subject: [PATCH 10/11] r/kubernetes_cluster: fixing the log messages --- azurerm/resource_arm_kubernetes_cluster.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 054aa1926973..9759c8f16e84 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -694,11 +694,11 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} future, err := client.CreateOrUpdate(ctx, resGroup, name, parameters) if err != nil { - return fmt.Errorf("Error creating/updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error creating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for completion of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error waiting for creation of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) } read, err := client.Get(ctx, resGroup, name) @@ -803,11 +803,11 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} future, err := client.CreateOrUpdate(ctx, resourceGroup, name, parameters) if err != nil { - return fmt.Errorf("Error creating/updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error updating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for completion of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) From 838eca980feb602a98da44d606ddf0e3a7a1dc39 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 1 Oct 2019 10:40:07 +0200 Subject: [PATCH 11/11] r/kubernetes_cluster: fixing the casing for the sku --- azurerm/resource_arm_kubernetes_cluster_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index e179a8f6ac4b..3ee78bdd04cb 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -643,7 +643,7 @@ func TestAccAzureRMKubernetesCluster_standardLoadBalancer(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "standard"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), ), }, }, @@ -666,7 +666,7 @@ func TestAccAzureRMKubernetesCluster_standardLoadBalancerComplete(t *testing.T) Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "standard"), + resource.TestCheckResourceAttr(resourceName, "network_profile.0.load_balancer_sku", "Standard"), ), }, }, @@ -1906,7 +1906,7 @@ resource "azurerm_kubernetes_cluster" "test" { network_profile { network_plugin = "azure" - load_balancer_sku = "standard" + load_balancer_sku = "Standard" } } `, rInt, location, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) @@ -1988,7 +1988,7 @@ resource "azurerm_kubernetes_cluster" "test" { dns_service_ip = "10.10.0.10" docker_bridge_cidr = "172.18.0.1/16" service_cidr = "10.10.0.0/16" - load_balancer_sku = "standard" + load_balancer_sku = "Standard" } } `, rInt, location, rInt, rInt, rInt, rInt, rInt, rInt, currentKubernetesVersion, rInt, clientId, clientSecret) @@ -2149,7 +2149,7 @@ resource "azurerm_kubernetes_cluster" "test" { network_profile { network_plugin = "kubenet" - load_balancer_sku = "standard" + load_balancer_sku = "Standard" } } `, rInt, location, rInt, rInt, olderKubernetesVersion, clientId, clientSecret)