Skip to content
This repository has been archived by the owner on Sep 13, 2021. It is now read-only.

first set of changes. count to for_each; merging defaults to avoid lo… #1

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 14 additions & 28 deletions aws_auth.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,38 +2,24 @@ data "aws_caller_identity" "current" {}

locals {
auth_launch_template_worker_roles = [
for index in range(0, var.create_eks ? local.worker_group_launch_template_count : 0) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
coalescelist(
aws_iam_instance_profile.workers_launch_template.*.role,
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
[""]
),
index
)}"
platform = lookup(
var.worker_groups_launch_template[index],
"platform",
local.workers_group_defaults["platform"]
)
for key, worker_group in (var.create_eks ? local.worker_groups_launch_template_with_defaults : {}) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${
var.manage_worker_iam_resources ?
aws_iam_instance_profile.workers_launch_template[key].role :
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile[key].role_name
}"
platform = worker_group.platform
}
]

auth_worker_roles = [
for index in range(0, var.create_eks ? local.worker_group_count : 0) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
coalescelist(
aws_iam_instance_profile.workers.*.role,
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
[""]
),
index,
)}"
platform = lookup(
var.worker_groups[index],
"platform",
local.workers_group_defaults["platform"]
)
for key, worker_group in (var.create_eks ? local.worker_groups_with_defaults : {}) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${
var.manage_worker_iam_resources ?
aws_iam_instance_profile.workers[key].role :
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile[key].role_name
}"
platform = worker_group.platform
}
]

Expand Down
122 changes: 38 additions & 84 deletions data.tf
Original file line number Diff line number Diff line change
Expand Up @@ -56,94 +56,56 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}

data "template_file" "userdata" {
count = var.create_eks ? local.worker_group_count : 0
template = lookup(
var.worker_groups[count.index],
"userdata_template_file",
file(
lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"]) == "windows"
for_each = var.create_eks ? local.worker_groups_with_defaults : {}

template = (each.value.userdata_template_file != ""
? each.value.userdata_template_file
: file(
each.value.platform == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
)
)

vars = merge({
platform = lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
var.worker_groups[count.index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
var.worker_groups[count.index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
var.worker_groups[count.index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
var.worker_groups[count.index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
vars = merge(
{
platform = each.value.platform
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = each.value.pre_userdata
additional_userdata = each.value.additional_userdata
bootstrap_extra_args = each.value.bootstrap_extra_args
kubelet_extra_args = each.value.kubelet_extra_args
},
lookup(
var.worker_groups[count.index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
each.value.userdata_template_extra_args
)
}

data "template_file" "launch_template_userdata" {
count = var.create_eks ? local.worker_group_launch_template_count : 0
template = lookup(
var.worker_groups_launch_template[count.index],
"userdata_template_file",
file(
lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows"
for_each = var.create_eks ? local.worker_groups_launch_template_with_defaults : {}

template = (each.value.userdata_template_file != ""
? each.value.userdata_template_file
: file(
each.value.platform == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
)
)

vars = merge({
platform = lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
var.worker_groups_launch_template[count.index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
var.worker_groups_launch_template[count.index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
var.worker_groups_launch_template[count.index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
var.worker_groups_launch_template[count.index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
vars = merge(
{
platform = each.value.platform
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = each.value.pre_userdata
additional_userdata = each.value.additional_userdata
bootstrap_extra_args = each.value.bootstrap_extra_args
kubelet_extra_args = each.value.kubelet_extra_args
},
lookup(
var.worker_groups_launch_template[count.index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
each.value.userdata_template_extra_args
)
}

Expand All @@ -153,21 +115,13 @@ data "aws_iam_role" "custom_cluster_iam_role" {
}

data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
count = var.manage_worker_iam_resources ? 0 : local.worker_group_count
name = lookup(
var.worker_groups[count.index],
"iam_instance_profile_name",
local.workers_group_defaults["iam_instance_profile_name"],
)
for_each = var.manage_worker_iam_resources ? {} : local.worker_groups_with_defaults
name = each.value.iam_instance_profile_name
}

data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
name = lookup(
var.worker_groups_launch_template[count.index],
"iam_instance_profile_name",
local.workers_group_defaults["iam_instance_profile_name"],
)
for_each = var.manage_worker_iam_resources ? {} : local.worker_groups_launch_template_with_defaults
name = each.value.iam_instance_profile_name
}

data "aws_partition" "current" {}
63 changes: 54 additions & 9 deletions local.tf
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,16 @@ locals {
sts_principal = "sts.${data.aws_partition.current.dns_suffix}"

policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
volume_defaults = {
volume_size = "100" # root volume size of workers instances.
volume_type = "gp3" # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
volume_throughput = null # The amount of throughput to provision for a gp3 volume.
delete_on_termination = true
block_device_name = data.aws_ami.eks_worker.root_device_name # Root device name for workers. If non is provided, will assume default AMI was used.
kms_key_id = "" # The KMS key to use when encrypting the root storage device
encrypted = false # Whether the volume should be encrypted or not
}
workers_group_defaults_defaults = {
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
tags = [] # A list of map defining extra tags to be applied to the worker group autoscaling group.
Expand All @@ -42,10 +52,11 @@ locals {
instance_type = "m4.large" # Size of the workers instances.
spot_price = "" # Cost of spot instance.
placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated".
root_volume_size = "100" # root volume size of workers instances.
root_volume_type = "gp3" # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
root_volume_throughput = null # The amount of throughput to provision for a gp3 volume.
root_volume_size = local.volume_defaults.volume_size # root volume size of workers instances.
root_volume_type = local.volume_defaults.volume_type # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
root_iops = local.volume_defaults.iops # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
root_volume_throughput = local.volume_defaults.volume_throughput # The amount of throughput to provision for a gp3 volume.
root_delete_on_termination = local.volume_defaults.delete_on_termination
key_name = "" # The key pair name that should be used for the instances in the autoscaling group
pre_userdata = "" # userdata to pre-append to the default userdata.
userdata_template_file = "" # alternate template to use for userdata
Expand All @@ -61,7 +72,7 @@ locals {
additional_security_group_ids = [] # A list of additional security group ids to include in worker launch config
protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
iam_instance_profile_name = "" # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
iam_role_id = "local.default_iam_role_id" # A custom IAM role id. Incompatible with iam_instance_profile_name. Literal local.default_iam_role_id will never be used but if iam_role_id is not set, the local.default_iam_role_id interpolation will be used.
iam_role_id = local.default_iam_role_id # A custom IAM role id. Incompatible with iam_instance_profile_name. Literal local.default_iam_role_id will never be used but if iam_role_id is not set, the local.default_iam_role_id interpolation will be used.
suspended_processes = ["AZRebalance"] # A list of processes to suspend. i.e. ["AZRebalance", "HealthCheck", "ReplaceUnhealthy"]
target_group_arns = null # A list of Application LoadBalancer (ALB) target group ARNs to be associated to the autoscaling group
load_balancers = null # A list of Classic LoadBalancer (CLB)'s name to be associated to the autoscaling group
Expand All @@ -72,13 +83,13 @@ locals {
platform = "linux" # Platform of workers. either "linux" or "windows"
additional_ebs_volumes = [] # A list of additional volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), volume_size, volume_type, iops, encrypted, kms_key_id (only on launch-template), delete_on_termination. Optional values are grabbed from root volume or from defaults
# Settings for launch templates
root_block_device_name = data.aws_ami.eks_worker.root_device_name # Root device name for workers. If non is provided, will assume default AMI was used.
root_kms_key_id = "" # The KMS key to use when encrypting the root storage device
root_block_device_name = local.volume_defaults.block_device_name # Root device name for workers. If non is provided, will assume default AMI was used.
root_kms_key_id = local.volume_defaults.kms_key_id # The KMS key to use when encrypting the root storage device
launch_template_id = null # The id of the launch template used for managed node_groups
launch_template_version = "$Latest" # The lastest version of the launch template to use in the autoscaling group
launch_template_placement_tenancy = "default" # The placement tenancy for instances
launch_template_placement_group = null # The name of the placement group into which to launch the instances, if any.
root_encrypted = false # Whether the volume should be encrypted or not
root_encrypted = local.volume_defaults.encrypted # Whether the volume should be encrypted or not
eni_delete = true # Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying)
cpu_credits = "standard" # T2/T3 unlimited mode, can be 'standard' or 'unlimited'. Used 'standard' mode as default to avoid paying higher costs
market_type = null
Expand All @@ -87,7 +98,7 @@ locals {
metadata_http_put_response_hop_limit = null # The desired HTTP PUT response hop limit for instance metadata requests.
# Settings for launch templates with mixed instances policy
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] # A list of override instance types for mixed instances policy
on_demand_allocation_strategy = null # Strategy to use when launching on-demand instances. Valid values: prioritized.
on_demand_allocation_strategy = "prioritized" # Strategy to use when launching on-demand instances. Valid values: prioritized.
on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
on_demand_percentage_above_base_capacity = "0" # Percentage split between on-demand and Spot instances above the base on-demand capacity
spot_allocation_strategy = "lowest-price" # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
Expand All @@ -101,6 +112,40 @@ locals {
var.workers_group_defaults,
)

#worker_groups_with_defaults_2 = { for key, group in var.worker_groups: key => merge(local.workers_group_defaults, group) }

worker_groups_launch_template_with_defaults = {
for key, template in var.worker_groups_launch_template: key => merge(
local.workers_group_defaults,
merge(
template,
{
additional_ebs_volumes = [
for volume_definition in template.additional_ebs_volumes: merge(
local.volume_defaults,
volume_definition)
]
}
)
)
}

worker_groups_with_defaults = {
for group in var.worker_groups: group.name => merge(
local.workers_group_defaults,
merge(
group,
{
additional_ebs_volumes = [
for volume_definition in group.additional_ebs_volumes: merge(
local.volume_defaults,
volume_definition)
]
}
)
)
}

ebs_optimized_not_supported = [
"c1.medium",
"c3.8xlarge",
Expand Down
Loading