From c6dc17c58b153be302bc15c06758d900f62c9216 Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Tue, 8 May 2018 07:42:12 -0700 Subject: [PATCH] New Data Source: `azurerm_kubernetes_cluster` (#1204) * New Data Source: `azurerm_kubernetes_cluster` * Documenting how to use the AKS data source with the K8S Provider * Fixing issues identified in code review * `service_principal` - setting as a List not a Set --- azurerm/data_source_kubernetes_cluster.go | 350 ++++++++++++++++++ .../data_source_kubernetes_cluster_test.go | 87 +++++ azurerm/provider.go | 1 + azurerm/resource_arm_kubernetes_cluster.go | 85 +++-- website/azurerm.erb | 4 + .../docs/d/kubernetes_cluster.html.markdown | 109 ++++++ 6 files changed, 599 insertions(+), 37 deletions(-) create mode 100644 azurerm/data_source_kubernetes_cluster.go create mode 100644 azurerm/data_source_kubernetes_cluster_test.go create mode 100644 website/docs/d/kubernetes_cluster.html.markdown diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go new file mode 100644 index 000000000000..10cd61503d02 --- /dev/null +++ b/azurerm/data_source_kubernetes_cluster.go @@ -0,0 +1,350 @@ +package azurerm + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2017-09-30/containerservice" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/kubernetes" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceArmKubernetesCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceArmKubernetesClusterRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "resource_group_name": resourceGroupNameForDataSourceSchema(), + + "location": locationForDataSourceSchema(), + + "dns_prefix": { + Type: schema.TypeString, + Computed: true, + }, + + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + + "kubernetes_version": { + Type: schema.TypeString, + Computed: true, + }, + + "kube_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "client_certificate": { + Type: schema.TypeString, + Computed: true, + }, + "client_key": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_ca_certificate": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "kube_config_raw": { + Type: schema.TypeString, + Computed: true, + }, + + "linux_profile": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_username": { + Type: schema.TypeString, + Computed: true, + }, + "ssh_key": { + Type: schema.TypeList, + Computed: true, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_data": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + + "agent_pool_profile": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "count": { + Type: schema.TypeInt, + Computed: true, + }, + + "dns_prefix": { + Type: schema.TypeString, + Computed: true, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + }, + + "os_disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + }, + + "vnet_subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + + "os_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "service_principal": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "tags": tagsForDataSourceSchema(), + }, + } +} + +func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) error { + kubernetesClustersClient := meta.(*ArmClient).kubernetesClustersClient + client := meta.(*ArmClient) + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + ctx := client.StopContext + resp, err := kubernetesClustersClient.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on AKS Managed Cluster %q (resource group %q): %+v", name, resourceGroup, err) + } + + profile, err := kubernetesClustersClient.GetAccessProfiles(ctx, resourceGroup, name, "clusterUser") + if err != nil { + return fmt.Errorf("Error getting access profile while making Read request on AKS Managed Cluster %q (resource group %q): %+v", name, resourceGroup, err) + } + + d.SetId(*resp.ID) + + d.Set("name", resp.Name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azureRMNormalizeLocation(*location)) + } + + if props := resp.ManagedClusterProperties; props != nil { + d.Set("dns_prefix", props.DNSPrefix) + d.Set("fqdn", props.Fqdn) + d.Set("kubernetes_version", props.KubernetesVersion) + + linuxProfile := flattenKubernetesClusterDataSourceLinuxProfile(props.LinuxProfile) + if err := d.Set("linux_profile", linuxProfile); err != nil { + return fmt.Errorf("Error setting `linux_profile`: %+v", err) + } + + agentPoolProfiles := flattenKubernetesClusterDataSourceAgentPoolProfiles(props.AgentPoolProfiles) + if err := d.Set("agent_pool_profile", agentPoolProfiles); err != nil { + return fmt.Errorf("Error setting `agent_pool_profile`: %+v", err) + } + + servicePrincipal := flattenKubernetesClusterDataSourceServicePrincipalProfile(resp.ManagedClusterProperties.ServicePrincipalProfile) + if err := d.Set("service_principal", servicePrincipal); err != nil { + return fmt.Errorf("Error setting `service_principal`: %+v", err) + } + } + + kubeConfigRaw, kubeConfig := flattenKubernetesClusterDataSourceAccessProfile(&profile) + d.Set("kube_config_raw", kubeConfigRaw) + + if err := d.Set("kube_config", kubeConfig); err != nil { + return fmt.Errorf("Error setting `kube_config`: %+v", err) + } + + flattenAndSetTags(d, resp.Tags) + + return nil +} + +func flattenKubernetesClusterDataSourceLinuxProfile(input *containerservice.LinuxProfile) []interface{} { + values := make(map[string]interface{}) + sshKeys := make([]interface{}, 0) + + if profile := input; profile != nil { + if username := profile.AdminUsername; username != nil { + values["admin_username"] = *username + } + + if ssh := profile.SSH; ssh != nil { + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range *keys { + if keyData := sshKey.KeyData; keyData != nil { + outputs := make(map[string]interface{}, 0) + outputs["key_data"] = *keyData + sshKeys = append(sshKeys, outputs) + } + } + } + } + } + + values["ssh_key"] = sshKeys + + return []interface{}{values} +} + +func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservice.AgentPoolProfile) []interface{} { + agentPoolProfiles := make([]interface{}, 0) + + if input == nil { + return agentPoolProfiles + } + + for _, profile := range *input { + agentPoolProfile := make(map[string]interface{}) + + if profile.Count != nil { + agentPoolProfile["count"] = int(*profile.Count) + } + + if profile.DNSPrefix != nil { + agentPoolProfile["dns_prefix"] = *profile.DNSPrefix + } + + if profile.Name != nil { + agentPoolProfile["name"] = *profile.Name + } + + if profile.VMSize != "" { + agentPoolProfile["vm_size"] = string(profile.VMSize) + } + + if profile.OsDiskSizeGB != nil { + agentPoolProfile["os_disk_size_gb"] = int(*profile.OsDiskSizeGB) + } + + if profile.VnetSubnetID != nil { + agentPoolProfile["vnet_subnet_id"] = *profile.VnetSubnetID + } + + if profile.OsType != "" { + agentPoolProfile["os_type"] = string(profile.OsType) + } + + agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile) + } + + return agentPoolProfiles +} + +func flattenKubernetesClusterDataSourceServicePrincipalProfile(profile *containerservice.ServicePrincipalProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + + values := make(map[string]interface{}) + + if clientId := profile.ClientID; clientId != nil { + values["client_id"] = *clientId + } + + return []interface{}{values} +} + +func flattenKubernetesClusterDataSourceAccessProfile(profile *containerservice.ManagedClusterAccessProfile) (*string, []interface{}) { + if profile == nil || profile.AccessProfile == nil { + return nil, []interface{}{} + } + + if kubeConfigRaw := profile.AccessProfile.KubeConfig; kubeConfigRaw != nil { + rawConfig := string(*kubeConfigRaw) + + kubeConfig, err := kubernetes.ParseKubeConfig(rawConfig) + if err != nil { + return utils.String(rawConfig), []interface{}{} + } + + flattenedKubeConfig := flattenKubernetesClusterDataSourceKubeConfig(*kubeConfig) + return utils.String(rawConfig), flattenedKubeConfig + } + + return nil, []interface{}{} +} + +func flattenKubernetesClusterDataSourceKubeConfig(config kubernetes.KubeConfig) []interface{} { + values := make(map[string]interface{}) + + cluster := config.Clusters[0].Cluster + user := config.Users[0].User + name := config.Users[0].Name + + values["host"] = cluster.Server + values["username"] = name + values["password"] = user.Token + values["client_certificate"] = user.ClientCertificteData + values["client_key"] = user.ClientKeyData + values["cluster_ca_certificate"] = cluster.ClusterAuthorityData + + return []interface{}{values} +} diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go new file mode 100644 index 000000000000..f5cb8dfb7e42 --- /dev/null +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -0,0 +1,87 @@ +package azurerm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + config := testAccDataSourceAzureRMKubernetesCluster_basic(ri, clientId, clientSecret, location) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.password"), + ), + }, + }, + }) +} + +func TestAccDataSourceAzureRMKubernetesCluster_internalNetwork(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + location := testLocation() + config := testAccDataSourceAzureRMKubernetesCluster_internalNetwork(ri, clientId, clientSecret, location) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttrSet(dataSourceName, "agent_pool_profile.0.vnet_subnet_id"), + ), + }, + }, + }) +} + +func testAccDataSourceAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string { + resource := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, resource) +} + +func testAccDataSourceAzureRMKubernetesCluster_internalNetwork(rInt int, clientId string, clientSecret string, location string) string { + resource := testAccAzureRMKubernetesCluster_internalNetwork(rInt, clientId, clientSecret, location) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, resource) +} diff --git a/azurerm/provider.go b/azurerm/provider.go index e4d7bd412cf1..bdc6d44bdd20 100644 --- a/azurerm/provider.go +++ b/azurerm/provider.go @@ -88,6 +88,7 @@ func Provider() terraform.ResourceProvider { "azurerm_eventhub_namespace": dataSourceEventHubNamespace(), "azurerm_image": dataSourceArmImage(), "azurerm_key_vault_access_policy": dataSourceArmKeyVaultAccessPolicy(), + "azurerm_kubernetes_cluster": dataSourceArmKubernetesCluster(), "azurerm_managed_disk": dataSourceArmManagedDisk(), "azurerm_network_interface": dataSourceArmNetworkInterface(), "azurerm_network_security_group": dataSourceArmNetworkSecurityGroup(), diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index a8765ce9a03c..e19008358af3 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -290,34 +290,36 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error making Read request on AKS Managed Cluster %q (resource group %q): %+v", name, resGroup, err) } + profile, err := kubernetesClustersClient.GetAccessProfiles(ctx, resGroup, name, "clusterUser") + if err != nil { + return fmt.Errorf("Error getting access profile while making Read request on AKS Managed Cluster %q (resource group %q): %+v", name, resGroup, err) + } + d.Set("name", resp.Name) d.Set("resource_group_name", resGroup) if location := resp.Location; location != nil { d.Set("location", azureRMNormalizeLocation(*location)) } - d.Set("dns_prefix", resp.DNSPrefix) - d.Set("fqdn", resp.Fqdn) - d.Set("kubernetes_version", resp.KubernetesVersion) - - linuxProfile := flattenAzureRmKubernetesClusterLinuxProfile(*resp.ManagedClusterProperties.LinuxProfile) - if err := d.Set("linux_profile", &linuxProfile); err != nil { - return fmt.Errorf("Error setting `linux_profile`: %+v", err) - } + if props := resp.ManagedClusterProperties; props != nil { + d.Set("dns_prefix", props.DNSPrefix) + d.Set("fqdn", props.Fqdn) + d.Set("kubernetes_version", props.KubernetesVersion) - agentPoolProfiles := flattenAzureRmKubernetesClusterAgentPoolProfiles(resp.ManagedClusterProperties.AgentPoolProfiles, resp.Fqdn) - if err := d.Set("agent_pool_profile", &agentPoolProfiles); err != nil { - return fmt.Errorf("Error setting `agent_pool_profile`: %+v", err) - } + linuxProfile := flattenAzureRmKubernetesClusterLinuxProfile(props.LinuxProfile) + if err := d.Set("linux_profile", linuxProfile); err != nil { + return fmt.Errorf("Error setting `linux_profile`: %+v", err) + } - servicePrincipal := flattenAzureRmKubernetesClusterServicePrincipalProfile(resp.ManagedClusterProperties.ServicePrincipalProfile) - if servicePrincipal != nil { - d.Set("service_principal", servicePrincipal) - } + agentPoolProfiles := flattenAzureRmKubernetesClusterAgentPoolProfiles(props.AgentPoolProfiles, resp.Fqdn) + if err := d.Set("agent_pool_profile", agentPoolProfiles); err != nil { + return fmt.Errorf("Error setting `agent_pool_profile`: %+v", err) + } - profile, err := kubernetesClustersClient.GetAccessProfiles(ctx, resGroup, name, "clusterUser") - if err != nil { - return fmt.Errorf("Error getting access profile while making Read request on AKS Managed Cluster %q (resource group %q): %+v", name, resGroup, err) + servicePrincipal := flattenAzureRmKubernetesClusterServicePrincipalProfile(resp.ManagedClusterProperties.ServicePrincipalProfile) + if err := d.Set("service_principal", servicePrincipal); err != nil { + return fmt.Errorf("Error setting `service_principal`: %+v", err) + } } kubeConfigRaw, kubeConfig := flattenAzureRmKubernetesClusterAccessProfile(&profile) @@ -352,27 +354,34 @@ func resourceArmKubernetesClusterDelete(d *schema.ResourceData, meta interface{} return future.WaitForCompletion(ctx, kubernetesClustersClient.Client) } -func flattenAzureRmKubernetesClusterLinuxProfile(profile containerservice.LinuxProfile) []interface{} { - profiles := make([]interface{}, 0) +func flattenAzureRmKubernetesClusterLinuxProfile(input *containerservice.LinuxProfile) []interface{} { values := make(map[string]interface{}) - sshKeys := make([]interface{}, 0, len(*profile.SSH.PublicKeys)) + sshKeys := make([]interface{}, 0) - for _, ssh := range *profile.SSH.PublicKeys { - keys := make(map[string]interface{}) - keys["key_data"] = *ssh.KeyData - sshKeys = append(sshKeys, keys) - } + if profile := input; profile != nil { + if username := profile.AdminUsername; username != nil { + values["admin_username"] = *username + } - values["admin_username"] = *profile.AdminUsername + if ssh := profile.SSH; ssh != nil { + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range *keys { + outputs := make(map[string]interface{}, 0) + if keyData := sshKey.KeyData; keyData != nil { + outputs["key_data"] = *keyData + } + sshKeys = append(sshKeys, outputs) + } + } + } + } values["ssh_key"] = sshKeys - profiles = append(profiles, values) - - return profiles + return []interface{}{values} } func flattenAzureRmKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.AgentPoolProfile, fqdn *string) []interface{} { - agentPoolProfiles := make([]interface{}, 0, len(*profiles)) + agentPoolProfiles := make([]interface{}, 0) for _, profile := range *profiles { agentPoolProfile := make(map[string]interface{}) @@ -427,9 +436,11 @@ func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerse values := make(map[string]interface{}) - values["client_id"] = *profile.ClientID - if profile.Secret != nil { - values["client_secret"] = *profile.Secret + if clientId := profile.ClientID; clientId != nil { + values["client_id"] = *clientId + } + if secret := profile.Secret; secret != nil { + values["client_secret"] = *secret } servicePrincipalProfiles.Add(values) @@ -448,7 +459,7 @@ func flattenAzureRmKubernetesClusterAccessProfile(profile *containerservice.Mana return utils.String(rawConfig), []interface{}{} } - flattenedKubeConfig := flattenKubeConfig(*kubeConfig) + flattenedKubeConfig := flattenKubernetesClusterKubeConfig(*kubeConfig) return utils.String(rawConfig), flattenedKubeConfig } } @@ -456,7 +467,7 @@ func flattenAzureRmKubernetesClusterAccessProfile(profile *containerservice.Mana return nil, []interface{}{} } -func flattenKubeConfig(config kubernetes.KubeConfig) []interface{} { +func flattenKubernetesClusterKubeConfig(config kubernetes.KubeConfig) []interface{} { values := make(map[string]interface{}) cluster := config.Clusters[0].Cluster diff --git a/website/azurerm.erb b/website/azurerm.erb index 7142801abd6e..e5a91a873599 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -72,6 +72,10 @@ azurerm_key_vault_access_policy + > + azurerm_kubernetes_cluster + + > azurerm_managed_disk diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown new file mode 100644 index 000000000000..11fd4bf06d4f --- /dev/null +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -0,0 +1,109 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_kubernetes_cluster" +sidebar_current: "docs-azurerm-data-source-kubernetes-cluster" +description: |- + Gets information about a managed Kubernetes Cluster (AKS) +--- + +# Data Source: azurerm_kubernetes_cluster + +Gets information about a managed Kubernetes Cluster (AKS) + +~> **Note:** All arguments including the client secret will be stored in the raw state as plain-text. +[Read more about sensitive data in state](/docs/state/sensitive-data.html). + + +## Example Usage + +```hcl +data "azurerm_kubernetes_cluster" "test" { + name = "myakscluster" + resource_group_name = "my-example-resource-group" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the managed Kubernetes Cluster. + +* `resource_group_name` - (Required) The name of the Resource Group in which the managed Kubernetes Cluster exists. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Kubernetes Managed Cluster ID. + +* `fqdn` - The FQDN of the Azure Kubernetes Managed Cluster. + +* `kube_config_raw` - Base64 encoded Kubernetes configuration. + +* `kube_config` - A `kube_config` block as defined below. + +* `location` - The Azure Region in which the managed Kubernetes Cluster exists. + +* `dns_prefix` - The DNS Prefix of the managed Kubernetes cluster. + +* `kubernetes_version` - The version of Kubernetes used on the managed Kubernetes Cluster. + +* `linux_profile` - A `linux_profile` block as documented below. + +* `agent_pool_profile` - One or more `agent_profile_pool` blocks as documented below. + +* `service_principal` - A `service_principal` block as documented below. + +* `tags` - A mapping of tags assigned to this resource. + +--- + +`kube_config` exports the following: + +* `client_key` - Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. + +* `client_certificate` - Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. + +* `cluster_ca_certificate` - Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. + +* `host` - The Kubernetes cluster server host. + +* `username` - A username used to authenticate to the Kubernetes cluster. + +* `password` - A password or token used to authenticate to the Kubernetes cluster. + +-> **NOTE:** It's possible to use these credentials with [the Kubernetes Provider](/docs/providers/kubernetes/index.html) like so: + +``` +provider "kubernetes" { + host = "${data.azurerm_kubernetes_cluster.main.kube_config.0.host}" + username = "${data.azurerm_kubernetes_cluster.main.kube_config.0.username}" + password = "${data.azurerm_kubernetes_cluster.main.kube_config.0.password}" + client_certificate = "${base64decode(data.azurerm_kubernetes_cluster.main.kube_config.0.client_certificate)}" + client_key = "${base64decode(data.azurerm_kubernetes_cluster.main.kube_config.0.client_key)}" + cluster_ca_certificate = "${base64decode(data.azurerm_kubernetes_cluster.main.kube_config.0.cluster_ca_certificate)}" +} +``` + +`linux_profile` exports the following: + +* `admin_username` - The username associated with the administrator account of the managed Kubernetes Cluster. +* `ssh_key` - One or more `ssh_key` blocks as defined below. + +`ssh_key` exports the following: + +* `key_data` - The Public SSH Key used to access the cluster. + +`agent_pool_profile` exports the following: + +* `name` - The name assigned to this pool of agents +* `count` - The number of Agents (VM's) in the Pool. +* `vm_size` - The size of each VM in the Agent Pool (e.g. `Standard_F1`). +* `os_disk_size_gb` - The size of the Agent VM's Operating System Disk in GB. +* `os_type` - The Operating System used for the Agents. +* `vnet_subnet_id` - The ID of the Subnet where the Agents in the Pool are provisioned. + +`service_principal` supports the following: + +* `client_id` - The Client ID of the Service Principal used by this Managed Kubernetes Cluster.