Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_kubernetes_cluster - support specifying the enable_pod_security_policy property #4098

Merged
merged 1 commit into from
Aug 15, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions azurerm/resource_arm_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,12 @@ func resourceArmKubernetesCluster() *schema.Resource {
ValidateFunc: validate.CIDR,
},
},

"enable_pod_security_policy": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
},
}
}
Expand Down Expand Up @@ -647,6 +653,8 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter

nodeResourceGroup := d.Get("node_resource_group").(string)

enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool)

parameters := containerservice.ManagedCluster{
Name: &name,
Location: &location,
Expand All @@ -663,6 +671,7 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter
NetworkProfile: networkProfile,
ServicePrincipalProfile: servicePrincipalProfile,
NodeResourceGroup: utils.String(nodeResourceGroup),
EnablePodSecurityPolicy: utils.Bool(enablePodSecurityPolicy),
},
Tags: expandTags(tags),
}
Expand Down Expand Up @@ -728,6 +737,7 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{})
d.Set("fqdn", props.Fqdn)
d.Set("kubernetes_version", props.KubernetesVersion)
d.Set("node_resource_group", props.NodeResourceGroup)
d.Set("enable_pod_security_policy", props.EnablePodSecurityPolicy)

apiServerAuthorizedIPRanges := utils.FlattenStringSlice(props.APIServerAuthorizedIPRanges)
if err := d.Set("api_server_authorized_ip_ranges", apiServerAuthorizedIPRanges); err != nil {
Expand Down
62 changes: 61 additions & 1 deletion azurerm/resource_arm_kubernetes_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -852,6 +852,34 @@ func TestAccAzureRMKubernetesCluster_nodeResourceGroup(t *testing.T) {
})
}

func TestAccAzureRMKubernetesCluster_enablePodSecurityPolicy(t *testing.T) {
resourceName := "azurerm_kubernetes_cluster.test"
ri := tf.AccRandTimeInt()
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
config := testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(ri, clientId, clientSecret, testLocation())

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "enable_pod_security_policy", "true"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func testCheckAzureRMKubernetesClusterExists(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
Expand Down Expand Up @@ -1045,7 +1073,7 @@ resource "azurerm_kubernetes_cluster" "test" {
client_id = "%s"
client_secret = "%s"
}

network_profile {
network_plugin = "azure"
network_policy = "azure"
Expand Down Expand Up @@ -2096,3 +2124,35 @@ resource "azurerm_kubernetes_cluster" "test" {
}
`, rInt, location, rInt, rInt, rInt, clientId, clientSecret)
}

func testAccAzureRMKubernetesCluster_enablePodSecurityPolicy(rInt int, clientId string, clientSecret string, location string) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "%s"
}

resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
dns_prefix = "acctestaks%d"
enable_pod_security_policy = true

role_based_access_control {
enabled = true
}

agent_pool_profile {
name = "default"
count = "1"
vm_size = "Standard_DS2_v2"
}

service_principal {
client_id = "%s"
client_secret = "%s"
}
}
`, rInt, location, rInt, rInt, clientId, clientSecret)
}
4 changes: 4 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,10 @@ resource "azurerm_subnet" "virtual" {

* `role_based_access_control` - (Optional) A `role_based_access_control` block. Changing this forces a new resource to be created.

* `enable_pod_security_policy` - (Optional) Whether Pod Security Policies are enabled. Note that this also requires role based access control to be enabled.

-> **NOTE:** Support for `enable_pod_security_policy` is currently in Preview on an opt-in basis. To use it, enable feature `PodSecurityPolicyPreview` for `namespace Microsoft.ContainerService`. For an example of how to enable a Preview feature, please visit [Register scale set feature provider](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler#register-scale-set-feature-provider).

* `node_resource_group` - (Optional) The name of the Resource Group where the the Kubernetes Nodes should exist. Changing this forces a new resource to be created.

-> **NOTE:** Azure requires that a new, non-existent Resource Group is used, as otherwise the provisioning of the Kubernetes Service will fail.
Expand Down