Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
goruha committed Nov 14, 2024
1 parent 81c9d27 commit dd308f8
Show file tree
Hide file tree
Showing 10 changed files with 656 additions and 58 deletions.
8 changes: 2 additions & 6 deletions .github/settings.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
# Upstream changes from _extends are only recognized when modifications are made to this file in the default branch.
_extends: .github
repository:
name: template
description: Template for Terraform Components
name: aws-eks-promtail
description: Promtail is an agent which ships the contents of local logs to a Loki instance
homepage: https://cloudposse.com/accelerate
topics: terraform, terraform-component




177 changes: 130 additions & 47 deletions README.yaml

Large diffs are not rendered by default.

128 changes: 128 additions & 0 deletions src/main.tf
Original file line number Diff line number Diff line change
@@ -1,8 +1,136 @@
locals {
enabled = module.this.enabled
name = length(module.this.name) > 0 ? module.this.name : "promtail"

# Assume basic auth is enabled if the loki component has a basic auth username output
basic_auth_enabled = local.enabled && length(module.loki.outputs.basic_auth_username) > 0

# These are the default values required to connect to eks/loki in the same namespace
loki_write_chart_values = {
config = {
clients = [
{
# Intentionally choose the loki-write service not loki-gateway. Loki gateway is disabled
url = "http://loki-write:3100/loki/api/v1/push"
tenant_id = "1"
basic_auth = local.basic_auth_enabled ? {
username = module.loki.outputs.basic_auth_username
password = data.aws_ssm_parameter.basic_auth_password[0].value
} : {}
}
]
}
}

# These are optional values used to expose an endpoint for the Push API
# https://grafana.com/docs/loki/latest/send-data/promtail/configuration/#loki_push_api
push_api_enabled = local.enabled && var.push_api.enabled
ingress_host_name = local.push_api_enabled ? format("%s.%s.%s", local.name, module.this.environment, module.dns_gbl_delegated[0].outputs.default_domain_name) : ""
ingress_group_name = local.push_api_enabled ? module.alb_controller_ingress_group[0].outputs.group_name : ""
default_push_api_scrape_config = <<-EOT
- job_name: push
loki_push_api:
server:
http_listen_port: 3500
grpc_listen_port: 3600
labels:
push: default
EOT
push_api_chart_values = {
config = {
snippets = {
extraScrapeConfigs = length(var.push_api.scrape_config) > 0 ? var.push_api.scrape_config : local.default_push_api_scrape_config
}
}
extraPorts = {
push = {
name = "push"
containerPort = "3500"
protocol = "TCP"
service = {
type = "ClusterIP"
port = "3500"
}
ingress = {
annotations = {
"kubernetes.io/ingress.class" = "alb"
"external-dns.alpha.kubernetes.io/hostname" = local.ingress_host_name
"alb.ingress.kubernetes.io/group.name" = local.ingress_group_name
"alb.ingress.kubernetes.io/backend-protocol" = "HTTP"
"alb.ingress.kubernetes.io/listen-ports" = "[{\"HTTP\": 80},{\"HTTPS\":443}]"
"alb.ingress.kubernetes.io/ssl-redirect" = "443"
"alb.ingress.kubernetes.io/target-type" = "ip"
}
hosts = [
local.ingress_host_name
]
tls = [
{
secretName = "${module.this.id}-tls"
hosts = [local.ingress_host_name]
}
]
}
}
}
}

scrape_config = join("\n", [for scrape_config_file in var.scrape_configs : file("${path.module}/${scrape_config_file}")])
scrape_config_chart_values = {
config = {
snippets = {
scrapeConfigs = local.scrape_config
}
}
}
}

data "aws_ssm_parameter" "basic_auth_password" {
count = local.basic_auth_enabled ? 1 : 0

name = module.loki.outputs.ssm_path_basic_auth_password
}

module "chart_values" {
source = "cloudposse/config/yaml//modules/deepmerge"
version = "1.0.2"

count = local.enabled ? 1 : 0

maps = [
local.loki_write_chart_values,
jsondecode(local.push_api_enabled ? jsonencode(local.push_api_chart_values) : jsonencode({})),
local.scrape_config_chart_values,
var.chart_values
]
}

module "promtail" {
source = "cloudposse/helm-release/aws"
version = "0.10.1"

enabled = local.enabled

name = local.name
chart = var.chart
description = var.chart_description
repository = var.chart_repository
chart_version = var.chart_version

kubernetes_namespace = var.kubernetes_namespace
create_namespace = var.create_namespace

verify = var.verify
wait = var.wait
atomic = var.atomic
cleanup_on_fail = var.cleanup_on_fail
timeout = var.timeout

eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "")

values = compact([
yamlencode(module.chart_values[0].merged),
])

context = module.this.context
}
4 changes: 0 additions & 4 deletions src/outputs.tf
Original file line number Diff line number Diff line change
@@ -1,4 +0,0 @@
output "mock" {
description = "Mock output example for the Cloud Posse Terraform component template"
value = local.enabled ? "hello ${basename(abspath(path.module))}" : ""
}
166 changes: 166 additions & 0 deletions src/provider-helm.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
##################
#
# This file is a drop-in to provide a helm provider.
#
# It depends on 2 standard Cloud Posse data source modules to be already
# defined in the same component:
#
# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster
# 2. module.eks to provide the EKS cluster information
#
# All the following variables are just about configuring the Kubernetes provider
# to be able to modify EKS cluster. The reason there are so many options is
# because at various times, each one of them has had problems, so we give you a choice.
#
# The reason there are so many "enabled" inputs rather than automatically
# detecting whether or not they are enabled based on the value of the input
# is that any logic based on input values requires the values to be known during
# the "plan" phase of Terraform, and often they are not, which causes problems.
#
variable "kubeconfig_file_enabled" {
type = bool
default = false
description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster"
}

variable "kubeconfig_file" {
type = string
default = ""
description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`"
}

variable "kubeconfig_context" {
type = string
default = ""
description = "Context to choose from the Kubernetes kube config file"
}

variable "kube_data_auth_enabled" {
type = bool
default = false
description = <<-EOT
If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`.
EOT
}

variable "kube_exec_auth_enabled" {
type = bool
default = true
description = <<-EOT
If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`.
EOT
}

variable "kube_exec_auth_role_arn" {
type = string
default = ""
description = "The role ARN for `aws eks get-token` to use"
}

variable "kube_exec_auth_role_arn_enabled" {
type = bool
default = true
description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`"
}

variable "kube_exec_auth_aws_profile" {
type = string
default = ""
description = "The AWS config profile for `aws eks get-token` to use"
}

variable "kube_exec_auth_aws_profile_enabled" {
type = bool
default = false
description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`"
}

variable "kubeconfig_exec_auth_api_version" {
type = string
default = "client.authentication.k8s.io/v1beta1"
description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin"
}

variable "helm_manifest_experiment_enabled" {
type = bool
default = false
description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan"
}

locals {
kubeconfig_file_enabled = var.kubeconfig_file_enabled
kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled
kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled

# Eventually we might try to get this from an environment variable
kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version

exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [
"--profile", var.kube_exec_auth_aws_profile
] : []

kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn)
exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [
"--role-arn", local.kube_exec_auth_role_arn
] : []

# Provide dummy configuration for the case where the EKS cluster is not available.
certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "")
# Use coalesce+try to handle both the case where the output is missing and the case where it is empty.
eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing")
eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "")
}

data "aws_eks_cluster_auth" "eks" {
count = local.kube_data_auth_enabled ? 1 : 0
name = local.eks_cluster_id
}

provider "helm" {
kubernetes {
host = local.eks_cluster_endpoint
cluster_ca_certificate = base64decode(local.certificate_authority_data)
token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null
# The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster
# in KUBECONFIG is some other cluster, this will cause problems, so we override it always.
config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : ""
config_context = var.kubeconfig_context

dynamic "exec" {
for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : []
content {
api_version = local.kubeconfig_exec_auth_api_version
command = "aws"
args = concat(local.exec_profile, [
"eks", "get-token", "--cluster-name", local.eks_cluster_id
], local.exec_role)
}
}
}
experiments {
manifest = var.helm_manifest_experiment_enabled && module.this.enabled
}
}

provider "kubernetes" {
host = local.eks_cluster_endpoint
cluster_ca_certificate = base64decode(local.certificate_authority_data)
token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null
# The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster
# in KUBECONFIG is some other cluster, this will cause problems, so we override it always.
config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : ""
config_context = var.kubeconfig_context

dynamic "exec" {
for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : []
content {
api_version = local.kubeconfig_exec_auth_api_version
command = "aws"
args = concat(local.exec_profile, [
"eks", "get-token", "--cluster-name", local.eks_cluster_id
], local.exec_role)
}
}
}
19 changes: 19 additions & 0 deletions src/providers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
provider "aws" {
region = var.region

# Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null.
profile = module.iam_roles.terraform_profile_name

dynamic "assume_role" {
# module.iam_roles.terraform_role_arn may be null, in which case do not assume a role.
for_each = compact([module.iam_roles.terraform_role_arn])
content {
role_arn = assume_role.value
}
}
}

module "iam_roles" {
source = "../../account-map/modules/iam-roles"
context = module.this.context
}
Loading

0 comments on commit dd308f8

Please sign in to comment.