diff --git a/azurerm/import_arm_kubernetes_cluster_test.go b/azurerm/import_arm_kubernetes_cluster_test.go index a90fa889a9d4..c1e8c6776da0 100644 --- a/azurerm/import_arm_kubernetes_cluster_test.go +++ b/azurerm/import_arm_kubernetes_cluster_test.go @@ -32,3 +32,28 @@ func TestAccAzureRMKubernetesCluster_importBasic(t *testing.T) { }, }) } + +func TestAccAzureRMKubernetesCluster_importLinuxProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 2e245b57d09c..125903ef607a 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -135,7 +135,7 @@ func resourceArmKubernetesCluster() *schema.Resource { "linux_profile": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -403,7 +403,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} AgentPoolProfiles: &agentProfiles, DNSPrefix: &dnsPrefix, KubernetesVersion: &kubernetesVersion, - LinuxProfile: &linuxProfile, + LinuxProfile: linuxProfile, ServicePrincipalProfile: servicePrincipalProfile, NetworkProfile: networkProfile, }, @@ -532,33 +532,40 @@ func resourceArmKubernetesClusterDelete(d *schema.ResourceData, meta interface{} return future.WaitForCompletionRef(ctx, kubernetesClustersClient.Client) } -func flattenAzureRmKubernetesClusterLinuxProfile(input *containerservice.LinuxProfile) []interface{} { +func flattenAzureRmKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + values := make(map[string]interface{}) sshKeys := make([]interface{}, 0) - if profile := input; profile != nil { - if username := profile.AdminUsername; username != nil { - values["admin_username"] = *username - } + if username := profile.AdminUsername; username != nil { + values["admin_username"] = *username + } - if ssh := profile.SSH; ssh != nil { - if keys := ssh.PublicKeys; keys != nil { - for _, sshKey := range *keys { - outputs := make(map[string]interface{}, 0) - if keyData := sshKey.KeyData; keyData != nil { - outputs["key_data"] = *keyData - } - sshKeys = append(sshKeys, outputs) + if ssh := profile.SSH; ssh != nil { + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range *keys { + outputs := make(map[string]interface{}, 0) + if keyData := sshKey.KeyData; keyData != nil { + outputs["key_data"] = *keyData } + sshKeys = append(sshKeys, outputs) } } } + values["ssh_key"] = sshKeys return []interface{}{values} } func flattenAzureRmKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.ManagedClusterAgentPoolProfile, fqdn *string) []interface{} { + if profiles == nil { + return []interface{}{} + } + agentPoolProfiles := make([]interface{}, 0) for _, profile := range *profiles { @@ -650,6 +657,10 @@ func flattenAzureRmKubernetesClusterAccessProfile(profile *containerservice.Mana } func flattenAzureRmKubernetesClusterNetworkProfile(profile *containerservice.NetworkProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + values := make(map[string]interface{}) values["network_plugin"] = profile.NetworkPlugin @@ -690,8 +701,13 @@ func flattenKubernetesClusterKubeConfig(config kubernetes.KubeConfig) []interfac return []interface{}{values} } -func expandAzureRmKubernetesClusterLinuxProfile(d *schema.ResourceData) containerservice.LinuxProfile { +func expandAzureRmKubernetesClusterLinuxProfile(d *schema.ResourceData) *containerservice.LinuxProfile { profiles := d.Get("linux_profile").([]interface{}) + + if len(profiles) == 0 { + return nil + } + config := profiles[0].(map[string]interface{}) adminUsername := config["admin_username"].(string) @@ -714,7 +730,7 @@ func expandAzureRmKubernetesClusterLinuxProfile(d *schema.ResourceData) containe }, } - return profile + return &profile } func expandAzureRmKubernetesClusterServicePrincipal(d *schema.ResourceData) *containerservice.ServicePrincipalProfile { diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index f40e23808205..86fde1c0837a 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -98,6 +98,36 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_linuxProfile(ri, clientId, clientSecret, testLocation()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), + ), + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := acctest.RandInt() @@ -150,7 +180,7 @@ func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { Config: upgradeConfig, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.8.1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.11.2"), ), }, }, @@ -336,7 +366,34 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + kubernetes_version = "1.10.7" + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } +} +`, rInt, location, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_linuxProfile(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" linux_profile { admin_username = "acctestuser%d" @@ -372,15 +429,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } + kubernetes_version = "1.10.7" agent_pool_profile { name = "default" @@ -393,7 +442,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_secret = "%s" } } -`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +`, rInt, location, rInt, rInt, clientId, clientSecret) } func testAccAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { @@ -426,7 +475,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" linux_profile { admin_username = "acctestuser%d" @@ -471,7 +519,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + linux_profile { admin_username = "acctestuser%d" ssh_key { @@ -489,7 +537,7 @@ resource "azurerm_kubernetes_cluster" "test" { client_id = "%s" client_secret = "%s" } - + addon_profile { oms_agent { enabled = true @@ -512,7 +560,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" + linux_profile { admin_username = "acctestuser%d" ssh_key { @@ -552,7 +600,7 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.8.1" + kubernetes_version = "1.11.2" linux_profile { admin_username = "acctestuser%d" @@ -606,7 +654,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" linux_profile { admin_username = "acctestuser%d" @@ -665,7 +712,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix = "acctestaks%d" - kubernetes_version = "1.7.7" linux_profile { admin_username = "acctestuser%d" diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 49113a917c71..c34215d04c17 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -27,14 +27,6 @@ resource "azurerm_kubernetes_cluster" "test" { resource_group_name = "${azurerm_resource_group.test.name}" dns_prefix   = "acctestagent1" - linux_profile { - admin_username = "acctestuser1" - - ssh_key { - key_data = "ssh-rsa ..." - } - } - agent_pool_profile { name = "default" count = 1 @@ -180,7 +172,7 @@ The following arguments are supported: * `dns_prefix` - (Required) DNS prefix specified when creating the managed cluster. -* `linux_profile` - (Required) A Linux Profile block as documented below. +* `linux_profile` - (Optional) A Linux Profile block as documented below. * `agent_pool_profile` - (Required) One or more Agent Pool Profile's block as documented below.